diff options
Diffstat (limited to 'tests/validation/fixtures')
115 files changed, 10852 insertions, 2794 deletions
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h index 91b43f0f24..a24ba8913e 100644 --- a/tests/validation/fixtures/ActivationLayerFixture.h +++ b/tests/validation/fixtures/ActivationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_ACTIVATION_LAYER_FIXTURE -#define ARM_COMPUTE_TEST_ACTIVATION_LAYER_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_ACTIVATIONLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_ACTIVATIONLAYERFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -47,12 +47,7 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ActivationValidationGenericFixture : public framework::Fixture { public: - ActivationValidationGenericFixture() - : _target(parameters->get_ctx<TensorType>()) - { - } - template <typename...> void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info) { ActivationLayerInfo info(function, alpha_beta, alpha_beta); @@ -120,29 +115,28 @@ protected: TensorType compute_target(const TensorShape &shape, ActivationLayerInfo info) { - auto ctx = parameters->get_ctx<TensorType>(); // Create tensors - TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info, DataLayout::NCHW, ctx); - TensorType dst = create_tensor<TensorType>(shape, _data_type, 1, _output_quantization_info, DataLayout::NCHW, ctx); + TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info, DataLayout::NCHW); + TensorType dst = create_tensor<TensorType>(shape, _data_type, 1, _output_quantization_info, DataLayout::NCHW); // Create and configure function - FunctionType act_layer(ctx); + FunctionType act_layer; TensorType *dst_ptr = _in_place ? nullptr : &dst; act_layer.configure(&src, dst_ptr, info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); if(!_in_place) { dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); } // Fill tensors @@ -234,7 +228,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ActivationValidationFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type) { ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, QuantizationInfo()); @@ -245,7 +238,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ActivationValidationQuantizedFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info) { ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, quantization_info); @@ -255,4 +247,4 @@ public: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_ACTIVATION_LAYER_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_ACTIVATIONLAYERFIXTURE_H diff --git a/tests/validation/fixtures/AddMulAddFixture.h b/tests/validation/fixtures/AddMulAddFixture.h new file mode 100644 index 0000000000..d13fef2f02 --- /dev/null +++ b/tests/validation/fixtures/AddMulAddFixture.h @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ACL_TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE_H + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/reference/ActivationLayer.h" +#include "tests/validation/reference/ArithmeticOperations.h" +#include "tests/validation/reference/DequantizationLayer.h" +#include "tests/validation/reference/PixelWiseMultiplication.h" +#include "tests/validation/reference/QuantizationLayer.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class AddMulAddGenericFixture : public framework::Fixture +{ +public: + void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info, bool interm_out) + { + compute_target(shape, data_type, act_info, interm_out); + } + +protected: + template <typename U> + void fill(U &&tensor, int i, DataType data_type) + { + switch(data_type) + { + case DataType::F32: + library->fill_tensor_uniform(tensor, i, -10.f, 10.f); + break; + case DataType::F16: + library->fill_tensor_uniform(tensor, i, -1.f, 1.f); + break; + default: + library->fill_tensor_uniform(tensor, i); + break; + } + } + + void compute_target(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info, bool interm_out) + { + TensorShape b_shape(shape.x()); + + // Create tensors + TensorType input1 = create_tensor<TensorType>(shape, data_type, 1, _input1_qinfo); + TensorType input2 = create_tensor<TensorType>(shape, data_type, 1, _input2_qinfo); + TensorType bn_mul = create_tensor<TensorType>(b_shape, data_type, 1, _bn_mul_qinfo); + TensorType bn_add = create_tensor<TensorType>(b_shape, data_type, 1, _bn_add_qinfo); + TensorType add_output = create_tensor<TensorType>(shape, data_type, 1, _add_output_qinfo); + TensorType final_output = create_tensor<TensorType>(shape, data_type, 1, _final_output_qinfo); + + // Create and configure function + FunctionType add_mul_add; + ARM_COMPUTE_ERROR_THROW_ON(add_mul_add.validate(input1.info(), input2.info(), bn_mul.info(), + bn_add.info(), interm_out ? add_output.info() : nullptr, final_output.info(), + ConvertPolicy::SATURATE, act_info)); + + add_mul_add.configure(&input1, &input2, &bn_mul, &bn_add, interm_out ? &add_output : nullptr, + &final_output, ConvertPolicy::SATURATE, act_info); + + // Allocate tensors + input1.allocator()->allocate(); + input2.allocator()->allocate(); + bn_mul.allocator()->allocate(); + bn_add.allocator()->allocate(); + + if(interm_out) + { + add_output.allocator()->allocate(); + } + + final_output.allocator()->allocate(); + + // Fill tensors + fill(AccessorType(input1), 0, data_type); + fill(AccessorType(input2), 1, data_type); + fill(AccessorType(bn_mul), 2, data_type); + fill(AccessorType(bn_add), 3, data_type); + + // // Compute function + add_mul_add.run(); + + _target = std::move(final_output); + + if(interm_out) + { + _interm_target = std::move(add_output); + } + } + + TensorType _target{}; + TensorType _interm_target{}; + SimpleTensor<T> _reference{}; + SimpleTensor<T> _interm_reference{}; + + QuantizationInfo _input1_qinfo{}; + QuantizationInfo _input2_qinfo{}; + QuantizationInfo _bn_mul_qinfo{}; + QuantizationInfo _bn_add_qinfo{}; + QuantizationInfo _add_output_qinfo{}; + QuantizationInfo _final_output_qinfo{}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool interm_out> +class AddMulAddFloatValidationFixture : public AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + using Parent = AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>; + + void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo act_info) + { + Parent::setup(shape, data_type, act_info, interm_out); + compute_reference(shape, data_type, act_info); + } + + // Compute Reference is moved outside of the generic fixture because with the quantized data types, + // it becomes a very different implementation with intermediate tensors' data types being always float. + // This way the reference calculations are more readable and the size of the classes will be smaller + // due to unrepeated fill() and target() methods. + void compute_reference(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info) + { + TensorShape b_shape(shape.x()); + + // Create reference + SimpleTensor<T> input1{ shape, data_type }; + SimpleTensor<T> input2{ shape, data_type }; + SimpleTensor<T> bn_mul{ b_shape, data_type }; + SimpleTensor<T> bn_add{ b_shape, data_type }; + SimpleTensor<T> add_output{ shape, data_type, 1 }; + + SimpleTensor<T> bn_mul_out{ shape, data_type }; + SimpleTensor<T> bn_add_out{ shape, data_type }; + + // Fill reference + Parent::fill(input1, 0, data_type); + Parent::fill(input2, 1, data_type); + Parent::fill(bn_mul, 2, data_type); + Parent::fill(bn_add, 3, data_type); + + reference::arithmetic_operation<T>(reference::ArithmeticOperation::ADD, input1, input2, add_output, ConvertPolicy::SATURATE); + bn_mul_out = reference::pixel_wise_multiplication<T, T, T>(add_output, bn_mul, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_UP, data_type); + reference::arithmetic_operation<T>(reference::ArithmeticOperation::ADD, bn_mul_out, bn_add, bn_add_out, ConvertPolicy::SATURATE); + + if(interm_out) + { + Parent::_interm_reference = std::move(add_output); + } + + if(act_info.enabled() && act_info.activation() != ActivationLayerInfo::ActivationFunction::IDENTITY) + { + Parent::_reference = reference::activation_layer(bn_add_out, act_info); + } + else + { + Parent::_reference = std::move(bn_add_out); + } + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool interm_out> +class AddMulAddQuantizedValidationFixture : public AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + using Parent = AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>; + + void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo act_info, + QuantizationInfo input1_qinfo, QuantizationInfo input2_qinfo, QuantizationInfo bn_mul_qinfo, + QuantizationInfo bn_add_qinfo, QuantizationInfo add_output_qinfo, QuantizationInfo final_output_qinfo) + { + // Quantization arguments moved to class attributes to prevent long function declerations + Parent::_input1_qinfo = input1_qinfo; + Parent::_input2_qinfo = input2_qinfo; + Parent::_bn_mul_qinfo = bn_mul_qinfo; + Parent::_bn_add_qinfo = bn_add_qinfo; + Parent::_add_output_qinfo = add_output_qinfo; + Parent::_final_output_qinfo = final_output_qinfo; + + Parent::setup(shape, data_type, act_info, interm_out); + compute_reference(shape, data_type, act_info); + } + + // Compute Reference is moved outside of the generic fixture because with the quantized data types, + // it becomes a very different implementation with intermediate tensors' data types being always float. + // This way the reference calculations are more readable and the size of the classes will be smaller + // due to unrepeated fill() and target() methods. + void compute_reference(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info) + { + TensorShape b_shape(shape.x()); + + // Create reference + SimpleTensor<T> input1{ shape, data_type, 1, Parent::_input1_qinfo }; + SimpleTensor<T> input2{ shape, data_type, 1, Parent::_input2_qinfo }; + SimpleTensor<T> bn_mul{ b_shape, data_type, 1, Parent::_bn_mul_qinfo }; + SimpleTensor<T> bn_add{ b_shape, data_type, 1, Parent::_bn_add_qinfo }; + + // Fill input tensors + Parent::fill(input1, 0, data_type); + Parent::fill(input2, 1, data_type); + Parent::fill(bn_mul, 2, data_type); + Parent::fill(bn_add, 3, data_type); + + SimpleTensor<float> input1_dequantized = reference::dequantization_layer<float>(input1); + SimpleTensor<float> input2_dequantized = reference::dequantization_layer<float>(input2); + SimpleTensor<float> bn_mul_dequantized = reference::dequantization_layer<float>(bn_mul); + SimpleTensor<float> bn_add_dequantized = reference::dequantization_layer<float>(bn_add); + + SimpleTensor<float> add_output_dequantized{ shape, DataType::F32 }; + SimpleTensor<float> bn_add_out_dequantized{ shape, DataType::F32 }; + + reference::arithmetic_operation<float>(reference::ArithmeticOperation::ADD, input1_dequantized, input2_dequantized, add_output_dequantized, ConvertPolicy::SATURATE); + SimpleTensor<float> bn_mul_out_dequantized = reference::pixel_wise_multiplication<float, float, float>(add_output_dequantized, bn_mul_dequantized, 1.f, ConvertPolicy::SATURATE, + RoundingPolicy::TO_NEAREST_UP, DataType::F32); + reference::arithmetic_operation<float>(reference::ArithmeticOperation::ADD, bn_mul_out_dequantized, bn_add_dequantized, bn_add_out_dequantized, ConvertPolicy::SATURATE); + + if(interm_out) + { + Parent::_interm_reference = reference::quantization_layer<float, T>(add_output_dequantized, data_type, Parent::_add_output_qinfo); + } + + if(act_info.enabled() && act_info.activation() != ActivationLayerInfo::ActivationFunction::IDENTITY) + { + SimpleTensor<T> ref = reference::quantization_layer<float, T>(bn_add_out_dequantized, data_type, Parent::_final_output_qinfo); + Parent::_reference = reference::activation_layer(ref, act_info); + } + else + { + Parent::_reference = reference::quantization_layer<float, T>(bn_add_out_dequantized, data_type, Parent::_final_output_qinfo); + } + } +}; +} // namespace validation +} // namespace test +} // namespace arm_compute + +#endif // ACL_TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE_H diff --git a/tests/validation/fixtures/ArgMinMaxFixture.h b/tests/validation/fixtures/ArgMinMaxFixture.h index f140c9846b..7a823568a8 100644 --- a/tests/validation/fixtures/ArgMinMaxFixture.h +++ b/tests/validation/fixtures/ArgMinMaxFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -42,15 +42,14 @@ namespace test { namespace validation { -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2> class ArgMinMaxValidationBaseFixture : public framework::Fixture { public: - template <typename...> - void setup(TensorShape shape, DataType data_type, int axis, ReductionOperation op, QuantizationInfo q_info) + void setup(TensorShape shape, DataType input_type, DataType output_type, int axis, ReductionOperation op, QuantizationInfo q_info) { - _target = compute_target(shape, data_type, axis, op, q_info); - _reference = compute_reference(shape, data_type, axis, op, q_info); + _target = compute_target(shape, input_type, output_type, axis, op, q_info); + _reference = compute_reference(shape, input_type, output_type, axis, op, q_info); } protected: @@ -80,7 +79,7 @@ protected: case DataType::QASYMM8: { std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second); + std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second); library->fill(tensor, distribution, 0); break; @@ -88,7 +87,7 @@ protected: case DataType::QASYMM8_SIGNED: { std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second); + std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second); library->fill(tensor, distribution, 0); break; @@ -98,25 +97,25 @@ protected: } } - TensorType compute_target(TensorShape &src_shape, DataType data_type, int axis, ReductionOperation op, QuantizationInfo q_info) + TensorType compute_target(TensorShape &src_shape, DataType input_type, DataType output_type, int axis, ReductionOperation op, QuantizationInfo q_info) { // Create tensors - TensorType src = create_tensor<TensorType>(src_shape, data_type, 1, q_info); - TensorType dst; + TensorType src = create_tensor<TensorType>(src_shape, input_type, 1, q_info); + TensorType dst = create_tensor<TensorType>(compute_output_shape(src_shape, axis), output_type, 1, q_info); // Create and configure function FunctionType arg_min_max_layer; arg_min_max_layer.configure(&src, axis, &dst, op); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -127,41 +126,43 @@ protected: return dst; } - SimpleTensor<int32_t> compute_reference(TensorShape &src_shape, DataType data_type, int axis, ReductionOperation op, QuantizationInfo q_info) + TensorShape compute_output_shape(const TensorShape &src_shape, int axis) + { + return arm_compute::misc::shape_calculator::compute_reduced_shape(src_shape, axis, false); + } + + SimpleTensor<T2> compute_reference(TensorShape &src_shape, DataType input_type, DataType output_type, int axis, ReductionOperation op, QuantizationInfo q_info) { // Create reference - SimpleTensor<T> src{ src_shape, data_type, 1, q_info }; + SimpleTensor<T1> src{ src_shape, input_type, 1, q_info }; // Fill reference fill(src); - TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(src_shape, axis, false); - return reference::reduction_operation<T, int32_t>(src, output_shape, axis, op); + return reference::reduction_operation<T1, T2>(src, compute_output_shape(src_shape, axis), axis, op, output_type); } - TensorType _target{}; - SimpleTensor<int32_t> _reference{}; + TensorType _target{}; + SimpleTensor<T2> _reference{}; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class ArgMinMaxValidationQuantizedFixture : public ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2> +class ArgMinMaxValidationQuantizedFixture : public ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type, int axis, ReductionOperation op, QuantizationInfo quantization_info) + void setup(const TensorShape &shape, DataType input_type, DataType output_type, int axis, ReductionOperation op, QuantizationInfo quantization_info) { - ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, op, quantization_info); + ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, input_type, output_type, axis, op, quantization_info); } }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class ArgMinMaxValidationFixture : public ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2> +class ArgMinMaxValidationFixture : public ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type, int axis, ReductionOperation op) + void setup(const TensorShape &shape, DataType input_type, DataType output_type, int axis, ReductionOperation op) { - ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, op, QuantizationInfo()); + ArgMinMaxValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, input_type, output_type, axis, op, QuantizationInfo()); } }; } // namespace validation diff --git a/tests/validation/fixtures/ArithmeticDivisionFixture.h b/tests/validation/fixtures/ArithmeticDivisionFixture.h index 782939a960..e11a386130 100644 --- a/tests/validation/fixtures/ArithmeticDivisionFixture.h +++ b/tests/validation/fixtures/ArithmeticDivisionFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticDivisionBroadcastValidationFixture : public framework::Fixture { public: - template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type) { _target = compute_target(shape0, shape1, data_type); @@ -73,18 +72,18 @@ protected: FunctionType add; add.configure(&ref_src1, &ref_src2, &dst); - ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors ref_src1.allocator()->allocate(); ref_src2.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(ref_src1), 0); @@ -117,7 +116,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticDivisionValidationFixture : public ArithmeticDivisionBroadcastValidationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape, DataType data_type) { ArithmeticDivisionBroadcastValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type); diff --git a/tests/validation/fixtures/ArithmeticOperationsFixture.h b/tests/validation/fixtures/ArithmeticOperationsFixture.h index 9ba7bd3ef7..0785af1151 100644 --- a/tests/validation/fixtures/ArithmeticOperationsFixture.h +++ b/tests/validation/fixtures/ArithmeticOperationsFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,16 +45,14 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticOperationGenericFixture : public framework::Fixture { public: - template <typename...> - void setup(reference::ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1, - DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info, bool in_place) + void setup(reference::ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info, bool is_inplace) { - _op = op; - _act_info = act_info; - _in_place = in_place; - _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out); - _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out); + _op = op; + _act_info = act_info; + _is_inplace = is_inplace; + _target = compute_target(shape0, shape1, data_type, convert_policy, qinfo0, qinfo1, qinfo_out); + _reference = compute_reference(shape0, shape1, data_type, convert_policy, qinfo0, qinfo1, qinfo_out); } protected: @@ -64,31 +62,55 @@ protected: library->fill_tensor_uniform(tensor, i); } - TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, + TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) { // Create tensors - TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0); - TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1); - TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out); - TensorType *dst_to_use = _in_place ? &ref_src1 : &dst; + const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1); + TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type, 1, qinfo0); + TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type, 1, qinfo1); + TensorType dst = create_tensor<TensorType>(out_shape, data_type, 1, qinfo_out); + + // Check whether do in-place computation and whether inputs are broadcast compatible + TensorType *actual_dst = &dst; + if(_is_inplace) + { + bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out); + bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out); + bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace); + ARM_COMPUTE_ASSERT(do_in_place); + + if(src1_is_inplace) + { + actual_dst = &ref_src1; + } + else + { + actual_dst = &ref_src2; + } + } // Create and configure function FunctionType arith_op; - arith_op.configure(&ref_src1, &ref_src2, dst_to_use, convert_policy, _act_info); + arith_op.configure(&ref_src1, &ref_src2, actual_dst, convert_policy, _act_info); - ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst_to_use->info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable()); // Allocate tensors ref_src1.allocator()->allocate(); ref_src2.allocator()->allocate(); - dst_to_use->allocator()->allocate(); - ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst_to_use->info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable()); + + // If don't do in-place computation, still need to allocate original dst + if(!_is_inplace) + { + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + dst.allocator()->allocate(); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + } // Fill tensors fill(AccessorType(ref_src1), 0); @@ -97,50 +119,40 @@ protected: // Compute function arith_op.run(); - if(_in_place) - { - return ref_src1; - } - return dst; + return std::move(*actual_dst); } - SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, - DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, + SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) { - // current in-place implementation only supports same metadata of input and output tensors. - // By ignoring output quantization information here, we can make test cases implementation much simpler. - QuantizationInfo output_qinfo = _in_place ? qinfo0 : qinfo_out; - // Create reference - SimpleTensor<T> ref_src1{ shape0, data_type0, 1, qinfo0 }; - SimpleTensor<T> ref_src2{ shape1, data_type1, 1, qinfo1 }; - SimpleTensor<T> ref_dst{ TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, output_qinfo }; + SimpleTensor<T> ref_src1{ shape0, data_type, 1, qinfo0 }; + SimpleTensor<T> ref_src2{ shape1, data_type, 1, qinfo1 }; + SimpleTensor<T> ref_dst{ TensorShape::broadcast_shape(shape0, shape1), data_type, 1, qinfo_out }; // Fill reference fill(ref_src1, 0); fill(ref_src2, 1); auto result = reference::arithmetic_operation<T>(_op, ref_src1, ref_src2, ref_dst, convert_policy); - return _act_info.enabled() ? reference::activation_layer(result, _act_info, output_qinfo) : result; + return _act_info.enabled() ? reference::activation_layer(result, _act_info, qinfo_out) : result; } TensorType _target{}; SimpleTensor<T> _reference{}; reference::ArithmeticOperation _op{ reference::ArithmeticOperation::ADD }; ActivationLayerInfo _act_info{}; - bool _in_place{}; + bool _is_inplace{}; }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T> class ArithmeticAdditionBroadcastValidationFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type0, data_type1, - output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), false); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type, convert_policy, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace); } }; @@ -148,11 +160,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticAdditionValidationFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) + void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1, - output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), false); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type, convert_policy, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace); } }; @@ -160,11 +171,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticAdditionBroadcastValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type0, data_type1, - output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type, convert_policy, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -172,11 +182,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticAdditionValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info) + void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1, - output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, false); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type, convert_policy, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -184,13 +193,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticAdditionValidationQuantizedFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type0, data_type1, - output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), false); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape, shape, data_type, convert_policy, + qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace); } }; @@ -198,13 +205,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticAdditionValidationQuantizedBroadcastFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, - ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, + bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, - data_type0, data_type1, output_data_type, convert_policy, - qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), false); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::ADD, shape0, shape1, data_type, convert_policy, + qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace); } }; @@ -212,12 +217,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticSubtractionBroadcastValidationFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, bool in_place) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, - data_type0, data_type1, output_data_type, convert_policy, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), in_place); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type, convert_policy, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace); } }; @@ -225,13 +228,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticSubtractionBroadcastValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, - bool in_place) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, + bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, - data_type0, data_type1, output_data_type, convert_policy, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, in_place); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type, convert_policy, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -239,12 +240,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticSubtractionValidationFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, bool in_place) + void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, - data_type0, data_type1, output_data_type, convert_policy, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), in_place); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type, convert_policy, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace); } }; @@ -252,12 +251,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticSubtractionValidationFloatFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool in_place) + void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, ActivationLayerInfo act_info, bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, - data_type0, data_type1, output_data_type, convert_policy, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, in_place); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type, convert_policy, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -265,14 +262,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticSubtractionValidationQuantizedFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool in_place) + void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, - data_type0, data_type1, output_data_type, - convert_policy, qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), in_place); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape, shape, data_type, convert_policy, + qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace); } }; @@ -280,13 +274,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticSubtractionValidationQuantizedBroadcastFixture : public ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, - ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool in_place) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, + bool is_inplace) { - ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, - data_type0, data_type1, output_data_type, convert_policy, - qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), in_place); + ArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(reference::ArithmeticOperation::SUB, shape0, shape1, data_type, convert_policy, + qinfo0, qinfo1, qinfo_out, ActivationLayerInfo(), is_inplace); } }; } // namespace validation diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h index f5a5420df3..54a0ed9e09 100644 --- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h +++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class BatchNormalizationLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout) { _data_type = dt; @@ -111,12 +110,12 @@ protected: TensorType *gamma_ptr = _use_gamma ? &gamma : nullptr; norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon, act_info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(mean.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(var.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(beta.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(gamma.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(mean.info()->is_resizable()); + ARM_COMPUTE_ASSERT(var.info()->is_resizable()); + ARM_COMPUTE_ASSERT(beta.info()->is_resizable()); + ARM_COMPUTE_ASSERT(gamma.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); @@ -126,12 +125,12 @@ protected: beta.allocator()->allocate(); gamma.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!mean.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!var.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!beta.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!gamma.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!mean.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!var.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!beta.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!gamma.info()->is_resizable()); // Fill tensors fill(AccessorType(src), AccessorType(mean), AccessorType(var), AccessorType(beta), AccessorType(gamma)); diff --git a/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h index ccacfeb062..161eeb0ef4 100644 --- a/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h +++ b/tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename ConvolutionFuncti class BatchNormalizationLayerFusionValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation, bool use_conv_b, bool use_beta, bool use_gamma, float epsilon, DataType dt, DataLayout data_layout) { @@ -110,16 +109,16 @@ protected: fuse_fn.configure(&conv_w, &bn_mean, &bn_var, &fused_w, &fused_b, conv_b_ptr, beta_ptr, gamma_ptr, epsilon); conv_fn.configure(&src, &fused_w, &fused_b, &dst, info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(conv_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(conv_b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bn_mean.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bn_var.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bn_beta.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bn_gamma.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(fused_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(fused_b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(conv_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(conv_b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bn_mean.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bn_var.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bn_beta.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bn_gamma.info()->is_resizable()); + ARM_COMPUTE_ASSERT(fused_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(fused_b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); @@ -133,16 +132,16 @@ protected: fused_b.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!conv_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!conv_b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bn_mean.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bn_var.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bn_beta.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bn_gamma.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!fused_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!fused_b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!conv_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!conv_b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bn_mean.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bn_var.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bn_beta.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bn_gamma.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!fused_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!fused_b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), diff --git a/tests/validation/fixtures/BatchToSpaceLayerFixture.h b/tests/validation/fixtures/BatchToSpaceLayerFixture.h index 796afd1c5d..56a6109dbc 100644 --- a/tests/validation/fixtures/BatchToSpaceLayerFixture.h +++ b/tests/validation/fixtures/BatchToSpaceLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,6 +24,7 @@ #ifndef ARM_COMPUTE_TEST_BATCH_TO_SPACE_LAYER_FIXTURE #define ARM_COMPUTE_TEST_BATCH_TO_SPACE_LAYER_FIXTURE +#include "arm_compute/core/Helpers.h" #include "tests/Globals.h" #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" @@ -39,11 +40,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class BatchToSpaceLayerValidationFixture : public framework::Fixture { public: - template <typename...> - void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout) + void setup(const TensorShape &input_shape, const std::vector<int32_t> &block_shape, const CropInfo &crop_info, const TensorShape &output_shape, DataType data_type, DataLayout data_layout) { - _target = compute_target(input_shape, block_shape_shape, output_shape, data_type, data_layout); - _reference = compute_reference(input_shape, block_shape_shape, output_shape, data_type); + _target = compute_target(input_shape, block_shape, crop_info, output_shape, data_type, data_layout); + _reference = compute_reference(input_shape, block_shape, crop_info, output_shape, data_type); } protected: @@ -56,9 +56,10 @@ protected: DistributionType distribution{ T(-1.0f), T(1.0f) }; library->fill(tensor, distribution, i); } - TensorType compute_target(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape, + TensorType compute_target(TensorShape input_shape, const std::vector<int32_t> &block_shape, const CropInfo &crop_info, TensorShape output_shape, DataType data_type, DataLayout data_layout) { + ARM_COMPUTE_ERROR_ON(block_shape.size() != 2U); // Only support batch to 2D space (x, y) for now if(data_layout == DataLayout::NHWC) { permute(input_shape, PermutationVector(2U, 0U, 1U)); @@ -66,64 +67,49 @@ protected: } // Create tensors - TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout); - TensorType block_shape = create_tensor<TensorType>(block_shape_shape, DataType::S32); - TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout); // Create and configure function FunctionType batch_to_space; - batch_to_space.configure(&input, &block_shape, &output); + batch_to_space.configure(&input, block_shape.at(0), block_shape.at(1), &output, crop_info); - ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(block_shape.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output.info()->is_resizable()); // Allocate tensors input.allocator()->allocate(); - block_shape.allocator()->allocate(); output.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!block_shape.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output.info()->is_resizable()); // Fill tensors fill(AccessorType(input), 0); - { - auto block_shape_data = AccessorType(block_shape); - const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - for(unsigned int i = 0; i < block_shape_shape.x(); ++i) - { - static_cast<int32_t *>(block_shape_data.data())[i] = output_shape[i + idx_width] / input_shape[i + idx_width]; - } - } // Compute function batch_to_space.run(); return output; } - SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &block_shape_shape, - const TensorShape &output_shape, DataType data_type) + SimpleTensor<T> compute_reference(const TensorShape &input_shape, const std::vector<int32_t> &block_shape, + const CropInfo &crop_info, const TensorShape &output_shape, DataType data_type) { + ARM_COMPUTE_ERROR_ON(block_shape.size() != 2U); // Only support batch to 2D space (x, y) for now // Create reference - SimpleTensor<T> input{ input_shape, data_type }; - SimpleTensor<int32_t> block_shape{ block_shape_shape, DataType::S32 }; + SimpleTensor<T> input{ input_shape, data_type }; // Fill reference fill(input, 0); - for(unsigned int i = 0; i < block_shape_shape.x(); ++i) - { - block_shape[i] = output_shape[i] / input_shape[i]; - } // Compute reference - return reference::batch_to_space(input, block_shape, output_shape); + return reference::batch_to_space(input, block_shape, crop_info, output_shape); } TensorType _target{}; SimpleTensor<T> _reference{}; }; + } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/fixtures/BitwiseAndFixture.h b/tests/validation/fixtures/BitwiseAndFixture.h index 6c8e1b1f6e..745a34058e 100644 --- a/tests/validation/fixtures/BitwiseAndFixture.h +++ b/tests/validation/fixtures/BitwiseAndFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class BitwiseAndValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type) { _target = compute_target(shape, data_type); @@ -69,17 +68,17 @@ protected: bitwise_and.configure(&src1, &src2, &dst); - ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src1.allocator()->allocate(); src2.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src1), 0); diff --git a/tests/validation/fixtures/BitwiseNotFixture.h b/tests/validation/fixtures/BitwiseNotFixture.h index c6affcfad6..bdfd255156 100644 --- a/tests/validation/fixtures/BitwiseNotFixture.h +++ b/tests/validation/fixtures/BitwiseNotFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class BitwiseNotValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type) { _target = compute_target(shape, data_type); @@ -68,14 +67,14 @@ protected: bitwise_not.configure(&src, &dst); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/BitwiseOrFixture.h b/tests/validation/fixtures/BitwiseOrFixture.h index a40f635b9e..03560e0171 100644 --- a/tests/validation/fixtures/BitwiseOrFixture.h +++ b/tests/validation/fixtures/BitwiseOrFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class BitwiseOrValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type) { _target = compute_target(shape, data_type); @@ -69,17 +68,17 @@ protected: bitwise_or.configure(&src1, &src2, &dst); - ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src1.allocator()->allocate(); src2.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src1), 0); diff --git a/tests/validation/fixtures/BitwiseXorFixture.h b/tests/validation/fixtures/BitwiseXorFixture.h index c103033fe6..4872b231a5 100644 --- a/tests/validation/fixtures/BitwiseXorFixture.h +++ b/tests/validation/fixtures/BitwiseXorFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class BitwiseXorValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type) { _target = compute_target(shape, data_type); @@ -69,17 +68,17 @@ protected: bitwise_xor.configure(&src1, &src2, &dst); - ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src1.allocator()->allocate(); src2.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src1), 0); diff --git a/tests/validation/fixtures/BoundingBoxTransformFixture.h b/tests/validation/fixtures/BoundingBoxTransformFixture.h index 7155848b4d..03edaeab16 100644 --- a/tests/validation/fixtures/BoundingBoxTransformFixture.h +++ b/tests/validation/fixtures/BoundingBoxTransformFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -102,7 +102,6 @@ class BoundingBoxTransformGenericFixture : public framework::Fixture public: using TDeltas = typename std::conditional<std::is_same<typename std::decay<T>::type, uint16_t>::value, uint8_t, T>::type; - template <typename...> void setup(TensorShape deltas_shape, const BoundingBoxTransformInfo &info, DataType data_type, QuantizationInfo deltas_qinfo) { const bool is_qasymm16 = data_type == DataType::QASYMM16; @@ -157,17 +156,17 @@ protected: FunctionType bbox_transform; bbox_transform.configure(&boxes, &pred_boxes, &deltas, bbox_info); - ARM_COMPUTE_EXPECT(deltas.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(boxes.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(pred_boxes.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(deltas.info()->is_resizable()); + ARM_COMPUTE_ASSERT(boxes.info()->is_resizable()); + ARM_COMPUTE_ASSERT(pred_boxes.info()->is_resizable()); // Allocate tensors deltas.allocator()->allocate(); boxes.allocator()->allocate(); pred_boxes.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!deltas.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!boxes.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!deltas.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!boxes.info()->is_resizable()); // Fill tensors TensorShape img_shape(bbox_info.scale() * bbox_info.img_width(), bbox_info.scale() * bbox_info.img_height()); @@ -215,7 +214,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class BoundingBoxTransformFixture : public BoundingBoxTransformGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape deltas_shape, const BoundingBoxTransformInfo &info, DataType data_type) { BoundingBoxTransformGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(deltas_shape, info, data_type, QuantizationInfo()); @@ -228,7 +226,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class BoundingBoxTransformQuantizedFixture : public BoundingBoxTransformGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape deltas_shape, const BoundingBoxTransformInfo &info, DataType data_type, QuantizationInfo deltas_qinfo) { BoundingBoxTransformGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(deltas_shape, info, data_type, deltas_qinfo); diff --git a/tests/validation/fixtures/CastFixture.h b/tests/validation/fixtures/CastFixture.h index c9764af37c..e9d624e6f3 100644 --- a/tests/validation/fixtures/CastFixture.h +++ b/tests/validation/fixtures/CastFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,7 +36,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class CastValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy) { _target = compute_target(shape, dt_in, dt_out, policy); @@ -86,6 +85,16 @@ protected: library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(signed_min), static_cast<int32_t>(signed_max)); break; } + case DataType::U64: + { + library->fill_tensor_uniform(tensor, i, static_cast<uint64_t>(unsigned_min), static_cast<uint64_t>(unsigned_max)); + break; + } + case DataType::S64: + { + library->fill_tensor_uniform(tensor, i, static_cast<int64_t>(signed_min), static_cast<int64_t>(signed_max)); + break; + } default: ARM_COMPUTE_ERROR("NOT SUPPORTED!"); } @@ -106,15 +115,15 @@ protected: FunctionType cast; cast.configure(&src, &dst, policy); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0, dt_in, dt_out); diff --git a/tests/validation/fixtures/ChannelShuffleLayerFixture.h b/tests/validation/fixtures/ChannelShuffleLayerFixture.h index de718fbbaa..530dba3893 100644 --- a/tests/validation/fixtures/ChannelShuffleLayerFixture.h +++ b/tests/validation/fixtures/ChannelShuffleLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ChannelShuffleLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, unsigned int num_groups, DataType data_type, DataLayout data_layout) { _target = compute_target(shape, data_type, num_groups, data_layout); @@ -75,15 +74,15 @@ protected: FunctionType channel_shuffle_func; channel_shuffle_func.configure(&src, &dst, num_groups); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/Col2ImFixture.h b/tests/validation/fixtures/Col2ImFixture.h index f8673af38a..4d56d607b7 100644 --- a/tests/validation/fixtures/Col2ImFixture.h +++ b/tests/validation/fixtures/Col2ImFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,10 +45,9 @@ namespace validation using namespace arm_compute::misc::shape_calculator; template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool batch_size_on_z> -class Col2ImValidationFixture : public framework::Fixture +class Col2ImOpValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, const unsigned int convolved_width, unsigned int convolved_height, unsigned int num_groups, DataType data_type) { const Size2D convolved_dims(convolved_width, convolved_height); @@ -74,23 +73,28 @@ protected: // Create and configure function FunctionType col2im_func; - col2im_func.configure(&src, &dst, convolved_dims, num_groups); + col2im_func.configure(src.info(), dst.info(), convolved_dims, num_groups); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); + arm_compute::ITensorPack pack = + { + { arm_compute::TensorType::ACL_SRC, &src }, + { arm_compute::TensorType::ACL_DST, &dst } + }; // Compute function - col2im_func.run(); + col2im_func.run(pack); return dst; } diff --git a/tests/validation/fixtures/ComparisonFixture.h b/tests/validation/fixtures/ComparisonFixture.h index 43da0ae49c..f25d5abb73 100644 --- a/tests/validation/fixtures/ComparisonFixture.h +++ b/tests/validation/fixtures/ComparisonFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ComparisonValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(ComparisonOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type, QuantizationInfo qinfo0, QuantizationInfo qinfo1) { _target = compute_target(op, shape0, shape1, data_type, qinfo0, qinfo1); @@ -71,18 +70,18 @@ protected: FunctionType comp_op; comp_op.configure(&ref_src1, &ref_src2, &dst, op); - ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors ref_src1.allocator()->allocate(); ref_src2.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(ref_src1), 0); @@ -117,7 +116,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ComparisonBroadcastValidationFixture : public ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(ComparisonOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type) { ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(op, shape0, shape1, data_type, QuantizationInfo(), QuantizationInfo()); @@ -128,7 +126,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ComparisonValidationFixture : public ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(ComparisonOperation op, const TensorShape &shape, DataType data_type) { ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(op, shape, shape, data_type, QuantizationInfo(), QuantizationInfo()); @@ -139,7 +136,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ComparisonValidationQuantizedFixture : public ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(ComparisonOperation op, const TensorShape &shape, DataType data_type, QuantizationInfo qinfo0, QuantizationInfo qinfo1) { @@ -151,7 +147,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ComparisonQuantizedBroadcastValidationFixture : public ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(ComparisonOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type, QuantizationInfo qinfo0, QuantizationInfo qinfo1) { ComparisonValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(op, shape0, shape1, data_type, qinfo0, qinfo1); diff --git a/tests/validation/fixtures/ComputeAllAnchorsFixture.h b/tests/validation/fixtures/ComputeAllAnchorsFixture.h index f385cb81f0..620f1b53fa 100644 --- a/tests/validation/fixtures/ComputeAllAnchorsFixture.h +++ b/tests/validation/fixtures/ComputeAllAnchorsFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ComputeAllAnchorsGenericFixture : public framework::Fixture { public: - template <typename...> void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type, QuantizationInfo qinfo) { _target = compute_target(num_anchors, data_type, info, qinfo); @@ -69,13 +68,13 @@ protected: FunctionType compute_all_anchors; compute_all_anchors.configure(&anchors, &all_anchors, info); - ARM_COMPUTE_EXPECT(all_anchors.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(all_anchors.info()->is_resizable()); // Allocate tensors all_anchors.allocator()->allocate(); anchors.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!all_anchors.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!all_anchors.info()->is_resizable()); // Fill tensors fill(AccessorType(anchors)); @@ -107,7 +106,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ComputeAllAnchorsFixture : public ComputeAllAnchorsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type) { ComputeAllAnchorsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(num_anchors, info, data_type, QuantizationInfo()); @@ -118,7 +116,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ComputeAllAnchorsQuantizedFixture : public ComputeAllAnchorsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type, QuantizationInfo qinfo) { ComputeAllAnchorsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(num_anchors, info, data_type, qinfo); diff --git a/tests/validation/fixtures/ConcatenateLayerFixture.h b/tests/validation/fixtures/ConcatenateLayerFixture.h index d9615ff8b5..3a021661ac 100644 --- a/tests/validation/fixtures/ConcatenateLayerFixture.h +++ b/tests/validation/fixtures/ConcatenateLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -50,7 +50,6 @@ private: using SrcITensorType = typename std::conditional<CI, const ITensorType, ITensorType>::type; public: - template <typename...> void setup(TensorShape shape, DataType data_type, unsigned int axis) { // Create input shapes @@ -119,20 +118,20 @@ protected: for(auto &src : srcs) { - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); } - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors for(auto &src : srcs) { src.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); } dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors int i = 0; diff --git a/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h b/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h index d7984830f5..7ad14e1b40 100644 --- a/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h +++ b/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ConvertFullyConnectedWeightsValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, unsigned int weights_w, DataLayout training_data_layout, DataType data_type) { const unsigned int height = input_shape.x() * input_shape.y() * input_shape.z(); @@ -61,7 +60,7 @@ protected: { case DataType::QASYMM8: { - std::uniform_int_distribution<uint8_t> distribution(0, 10); + std::uniform_int_distribution<uint32_t> distribution(0, 10); library->fill(tensor, distribution, i); break; } @@ -93,15 +92,15 @@ protected: convert_weights.configure(&src, &dst, input_shape, training_data_layout); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h index a4db49fc8e..0622e5e6f0 100644 --- a/tests/validation/fixtures/ConvolutionLayerFixture.h +++ b/tests/validation/fixtures/ConvolutionLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,12 +21,19 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE -#define ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE + +#ifndef ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" +#include "arm_compute/graph/Utils.h" +#ifdef ARM_COMPUTE_OPENCL_ENABLED +#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h" +#endif // ARM_COMPUTE_OPENCL_ENABLED #include "arm_compute/runtime/NEON/NEScheduler.h" +#include "src/core/NEON/kernels/arm_gemm/utils.hpp" +#include "src/graph/mutators/MutatorUtils.h" #include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/IAccessor.h" @@ -35,10 +42,12 @@ #include "tests/validation/Helpers.h" #include "tests/validation/reference/ActivationLayer.h" #include "tests/validation/reference/ConvolutionLayer.h" +#include "tests/validation/reference/PadLayer.h" #include "tests/validation/reference/Permute.h" #include "tests/validation/reference/Utils.h" #include <random> +#include <type_traits> namespace arm_compute { @@ -49,13 +58,30 @@ namespace validation namespace detail { template <typename ConvolutionFunction, typename TensorType> -void configure_conv_function(ConvolutionFunction &func, +#ifdef ARM_COMPUTE_OPENCL_ENABLED +std::enable_if_t<!std::is_same<ConvolutionFunction, CLGEMMConvolutionLayer>::value, void> +#else // ARM_COMPUTE_OPENCL_ENABLED +void +#endif // ARM_COMPUTE_OPENCL_ENABLED +configure_conv_function(ConvolutionFunction &func, + TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst, + const PadStrideInfo &info, const WeightsInfo &weights_info, + const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups) +{ + func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, false /* enable_fast_math */, num_groups); +} + +#ifdef ARM_COMPUTE_OPENCL_ENABLED +template <typename ConvolutionFunction, typename TensorType> +std::enable_if_t<std::is_same<ConvolutionFunction, CLGEMMConvolutionLayer>::value, void> +configure_conv_function(ConvolutionFunction &func, TensorType *src, const TensorType *weights, const TensorType *bias, TensorType *dst, const PadStrideInfo &info, const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups) { func.configure(src, weights, bias, dst, info, weights_info, dilation, act_info, num_groups); } +#endif // ARM_COMPUTE_OPENCL_ENABLED } // namespace detail template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW> @@ -66,26 +92,84 @@ public: || std::is_same<typename std::decay<T>::type, int8_t>::value, int32_t, T >::type; + void setup_quantization(TensorShape input_shape, TensorShape weights_shape, QuantizationInfo &input_q_info, + QuantizationInfo &weights_q_info, DataType data_type) + { + const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max()); + const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min()); + + std::mt19937 generator(library->seed() + _hash); + std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f); + std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max); + + const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + + const int32_t offset_lhs = distribution_t(generator); + const int32_t offset_rhs = distribution_t(generator); + + _quantization_info = QuantizationInfo(scale_lhs, offset_lhs); + _weight_quantization_info = QuantizationInfo(scale_rhs, offset_rhs); + + QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(input_q_info, weights_q_info, + weights_shape.y() /* heights */, weights_shape.x() /* width */, input_shape.z() /* channels */, + data_type, 0.5f /* bias_fraction */); + + _dst_q_info = q_hint.q_info; + _min_bias = q_hint.bias_min; + _max_bias = q_hint.bias_max; + } + public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, - DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info) + DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info, + bool mixed_layout = false, PaddingList pre_pad_layer = PaddingList({}), bool padded_weights = false) { + // This hash is used by random generators. There may be hash collisions but + // this is intentional as it's a very easy way to make the the current + // random generation process almost different for many test configurations, + // which were using the same set of values before. + _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] + + + weights_shape[0] + weights_shape[1] + weights_shape[2] + weights_shape[3] + + mixed_layout + (data_type == DataType::QASYMM8_SIGNED) + (data_layout == DataLayout::NHWC); + + _mixed_layout = mixed_layout; _data_type = data_type; _weights_data_type = weights_data_type; - _is_quantized = is_data_type_quantized_asymmetric(data_type); + const bool is_quantized = is_data_type_quantized(weights_data_type); _is_bfloat16 = data_type == DataType::BFLOAT16; - _bias_data_type = _is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type); + _bias_data_type = is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type); _output_data_type = _is_bfloat16 ? DataType::F32 : data_type; _quantization_info = quantization_info; _weight_quantization_info = weight_quantization_info; _data_layout = data_layout; + _dst_q_info = quantization_info; + + if(is_quantized && !is_data_type_quantized_symmetric(weights_data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY)) + { + setup_quantization(input_shape, weights_shape, _quantization_info, _weight_quantization_info, data_type); + _use_dynamic_output_quant = true; + } - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info, pre_pad_layer, padded_weights); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info, pre_pad_layer); } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(_data_layout); + dst.info()->set_data_layout(_data_layout); + } + void regularize_values(void *values, size_t size) { float *fvalues = static_cast<float *>(values); @@ -102,16 +186,34 @@ protected: { case DataType::QASYMM8: { - std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second); - library->fill(tensor, distribution, i); + if(_use_dynamic_output_quant) + { + std::uniform_int_distribution<int32_t> distribution(0, 255); + library->fill(tensor, distribution, i); + } + else + { + // Legacy initialization in case the output quantization info can't be reliably estimated + std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second); + library->fill(tensor, distribution, i); + } break; } case DataType::QASYMM8_SIGNED: { - std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second); - library->fill(tensor, distribution, i); + if(_use_dynamic_output_quant) + { + std::uniform_int_distribution<int32_t> distribution(-128, 127); + library->fill(tensor, distribution, i); + } + else + { + // Legacy initialization in case the output quantization info can't be reliably estimated + std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second); + library->fill(tensor, distribution, i); + } break; } case DataType::QSYMM8_PER_CHANNEL: @@ -130,13 +232,13 @@ protected: max_bound = bounds.second; } } - std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound); + std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound); library->fill(tensor, distribution, i); break; } case DataType::S32: { - std::uniform_int_distribution<int32_t> distribution(-100, 100); + std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias); library->fill(tensor, distribution, i); break; } @@ -163,8 +265,9 @@ protected: } } + // given input is IN nchw format TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info, - bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info) + bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}), bool padded_weights = false) { ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0); @@ -175,6 +278,18 @@ protected: permute(input_shape, PermutationVector(2U, 0U, 1U)); permute(weights_shape, PermutationVector(2U, 0U, 1U)); permute(output_shape, PermutationVector(2U, 0U, 1U)); + + if(pre_pad_layer.size() > 0) + { + // make sure paddings exist for each c,h,w dimensions + for(unsigned int i = 0; i < 3 - pre_pad_layer.size(); ++i) + { + pre_pad_layer.push_back({ 0, 0 }); + } + + // rotate padding info from nchw to nhwc + std::rotate(pre_pad_layer.begin(), pre_pad_layer.begin() + 2, pre_pad_layer.begin() + 3); + } } const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH); @@ -186,17 +301,47 @@ protected: // Create tensors TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout); TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout); - TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout); - TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _quantization_info, _data_layout); + TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, QuantizationInfo() /*bias is not a quantized type*/, _data_layout); + TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _dst_q_info, _data_layout); // Create and configure function FunctionType conv; - detail::configure_conv_function(conv, &src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + const unsigned int height_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::HEIGHT); + const unsigned int width_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::WIDTH); + + const PaddingInfo pad_w = width_index < pre_pad_layer.size() ? pre_pad_layer[width_index] : PaddingInfo(0, 0); + const PaddingInfo pad_h = height_index < pre_pad_layer.size() ? pre_pad_layer[height_index] : PaddingInfo(0, 0); + + if(pre_pad_layer.size() > 0 && arm_compute::graph::is_padding_in_height_or_width(_data_layout, pre_pad_layer)) + { + // this is the logic implemented in NodeFusionMutator -> fuse_pad_with_convolution + const PadStrideInfo new_conv_info( + info.stride().first, + info.stride().second, + info.pad_left() + pad_w.first, + info.pad_right() + pad_w.second, + info.pad_top() + pad_h.first, + info.pad_bottom() + pad_h.second, + info.round()); + detail::configure_conv_function(conv, &src, &weights, &bias, &dst, new_conv_info, weights_info, dilation, act_info, num_groups); + } + else + { + detail::configure_conv_function(conv, &src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups); + } + + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + // Test "add padding after configure" behavior. This behavior should not affect the correctness + add_padding_x({ &src, &bias, &dst }, _data_layout); + // Padding weights may affect code path in some backends + if (padded_weights) + { + add_padding_x({ &weights }, _data_layout); + } // Allocate tensors src.allocator()->allocate(); @@ -204,24 +349,31 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors - fill(AccessorType(src), 0); - fill(AccessorType(weights), 1); - fill(AccessorType(bias), 2); + fill(AccessorType(src), 0 + _hash); + fill(AccessorType(weights), 1 + _hash); + fill(AccessorType(bias), 2 + _hash); - // Compute NEConvolutionLayer function - conv.run(); + if(_mixed_layout) + { + mix_layout(conv, src, dst); + } + else + { + // Compute Convolution function + conv.run(); + } return dst; } SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - const Size2D &dilation, const ActivationLayerInfo act_info) + const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({})) { ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0); @@ -237,9 +389,9 @@ protected: SimpleTensor<TW> weights{ weights_shape, weights_dt, 1, _weight_quantization_info }; SimpleTensor<TBias> bias{ bias_shape, bias_dt, 1, _quantization_info }; - fill(src, 0); - fill(weights, 1); - fill(bias, 2); + fill(src, 0 + _hash); + fill(weights, 1 + _hash); + fill(bias, 2 + _hash); // Fill with bfloat16 to perform the conversion and reduce the mismatches in the output if(_is_bfloat16) @@ -248,9 +400,14 @@ protected: regularize_values(static_cast<void *>(weights.data()), weights.num_elements()); } - return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups), + if(pre_pad_layer.size() > 0) + { + src = reference::pad_layer<T>(src, pre_pad_layer, PixelValue(0), PaddingMode::CONSTANT); + } + + return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups, _dst_q_info), act_info) : - reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups); + reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups, _dst_q_info); } TensorType _target{}; @@ -262,34 +419,63 @@ protected: DataLayout _data_layout{}; QuantizationInfo _quantization_info{}; QuantizationInfo _weight_quantization_info{}; - bool _is_quantized = false; + QuantizationInfo _dst_q_info{}; bool _is_bfloat16 = false; + bool _mixed_layout = false; + bool _use_dynamic_output_quant{false}; + int32_t _hash{0}; + int32_t _min_bias{-100}; + int32_t _max_bias{100}; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) { ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, data_type, data_layout, - QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout); } }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> +class ConvolutionValidationPaddedWeightsFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T> +{ +public: + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type, + DataLayout data_layout) + { + ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, + data_type, data_type, data_layout, + QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), mixed_layout, PaddingList({}), true); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> +class ConvolutionValidationWithPaddingFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T> +{ +public: + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type, + DataLayout data_layout, ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({})) + { + ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, + data_type, data_type, data_layout, + QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout, pre_pad_layer); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info) { ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, - data_type, data_type, data_layout, quantization_info, quantization_info, act_info); + data_type, data_type, data_layout, quantization_info, quantization_info, act_info, mixed_layout); } }; @@ -297,7 +483,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ConvolutionValidationQuantizedPerChannelFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataType weights_data_type) { @@ -313,7 +498,311 @@ public: quantization_info, QuantizationInfo(weights_scales), act_info); } }; + + +#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS +inline TensorInfo prepare_weights(const TensorInfo tensor_info, const arm_compute::WeightFormat weight_format) +{ + const DataLayout data_layout = tensor_info.data_layout(); + ARM_COMPUTE_EXPECT(data_layout == DataLayout::NHWC, framework::LogLevel::ERRORS); + const DataType data_type = tensor_info.data_type(); + const TensorShape tensor_shape = tensor_info.tensor_shape(); + const int N = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O + const int H = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)]; + const int W = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)]; + const int C = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I + + const int interleave_by = arm_compute::interleave_by(weight_format); + const int block_by = arm_compute::block_by(weight_format); + const int Ip = arm_gemm::roundup<unsigned int>(C, block_by); // C'=I' + const int Op = arm_gemm::roundup<unsigned int>(N, interleave_by); // O'=N' + + arm_compute::Strides strides_in_bytes = tensor_info.strides_in_bytes(); + strides_in_bytes.set(1, Ip * interleave_by * H * W * tensor_info.element_size()); + strides_in_bytes.set(2, Ip * Op * tensor_info.element_size()); + + const size_t offset_first_element_in_bytes = tensor_info.offset_first_element_in_bytes(); + + // Total size needs to include padded dimensions + const size_t total_size_in_bytes = Op * H * W * Ip * tensor_info.element_size(); + + const TensorShape TS(Ip, W, H, Op); + + TensorInfo new_tensor_info = tensor_info; + new_tensor_info.init(TS, 1 /*num_channels, deprecated*/, data_type, strides_in_bytes, + offset_first_element_in_bytes, total_size_in_bytes); + return new_tensor_info; +} + +template <typename ScalarType, typename AccessorType> +inline void rearrange_data(const AccessorType src, AccessorType dst, const arm_compute::WeightFormat weight_format) +{ + ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format(weight_format), framework::LogLevel::ERRORS); + // Data Layout: OHWIo<interleave_by>i<block_by> + const int interleave_by = arm_compute::interleave_by(weight_format); + const int block_by = arm_compute::block_by(weight_format); + const TensorShape src_tensor_shape = src.shape(); + const DataLayout data_layout = src.data_layout(); + ARM_COMPUTE_EXPECT(data_layout == DataLayout::NHWC, framework::LogLevel::ERRORS); + const unsigned int O = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O + const unsigned int H = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)]; + const unsigned int W = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)]; + const unsigned int I = src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I + const unsigned int Ip = arm_gemm::roundup<unsigned int>(I, block_by); // C'=I' + const unsigned int Op = arm_gemm::roundup<unsigned int>(O, interleave_by); // N'=O' + + ARM_COMPUTE_EXPECT_EQUAL(Op * H * W * Ip, (unsigned)dst.num_elements(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(src.num_elements() <= dst.num_elements(), framework::LogLevel::ERRORS); + + const ScalarType *src_ptr = reinterpret_cast<const ScalarType *>(src.data()); + ScalarType *dst_ptr = reinterpret_cast<ScalarType *>(dst.data()); + for(unsigned i = 0; i < I; ++i) + for(unsigned w = 0; w < W; ++w) + for(unsigned h = 0; h < H; ++h) + for(unsigned o = 0; o < O; ++o) + { + ScalarType src_element; + switch(data_layout) + { + case DataLayout::NHWC: + { + src_element = src_ptr[o * H * W * I + h * W * I + w * I + i]; + } + break; + default: + { + ARM_COMPUTE_ERROR("Unsupported memory layout."); + } + } + const int x5 = std::floor(((float)o) / interleave_by); + const int x4 = h; + const int x3 = w; + const int x2 = std::floor((float)i / block_by); + const int x1 = o % interleave_by; + const int x0 = i % block_by; + unsigned dst_idx = x5 * H * W * Ip * interleave_by + + x4 * W * Ip * interleave_by + + x3 * Ip * interleave_by + + x2 * interleave_by * block_by + + x1 * block_by + + x0; + dst_ptr[dst_idx] = src_element; + } +} + +template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math> +class VariableWeightsFixtureBaseClass : public framework::Fixture +{ +public: + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataLayout data_layout, + const DataType data_type) + { + conv = std::make_unique<ConvolutionFunction>(); + // prepare data + _data_layout = data_layout; + // Fixed format kernels for variable weights can work only with NHWC format. + ARM_COMPUTE_EXPECT_EQUAL(_data_layout, DataLayout::NHWC, framework::LogLevel::ERRORS); + _data_type = data_type; + // run the code + compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation); + compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation); + } + void teardown() + { + _target.allocator()->free(); + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + switch(tensor.data_type()) + { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f }; + library->fill(tensor, distribution, i); + break; + } + case DataType::F32: + { + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, i); + break; + } + default: + library->fill_tensor_uniform(tensor, i); + } + } + +private: + virtual void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info, + const PadStrideInfo &conv_info, + const Size2D &dilation) = 0; + + void compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &conv_info, + const Size2D &dilation) + { + // The dataset is always in NCHW format - we need to make C the + // innermost dimension because the fixed-format kernel work only + // with NHWC layout. + permute(input_shape, PermutationVector(2U, 0U, 1U)); + permute(weights_shape, PermutationVector(2U, 0U, 1U)); + permute(output_shape, PermutationVector(2U, 0U, 1U)); + const auto src_tensor_info = TensorInfo(input_shape, 1, _data_type, _data_layout); + const auto weight_tensor_info = TensorInfo(weights_shape, 1, _data_type, _data_layout); + const auto bias_tensor_info = TensorInfo(bias_shape, 1, _data_type, _data_layout); + auto dst_tensor_info = TensorInfo(output_shape, 1, _data_type, _data_layout); + + const int kernel_height = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT)]; + const int kernel_width = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH)]; + const int num_kernels = weights_shape[get_data_layout_dimension_index(_data_layout, DataLayoutDimension::BATCHES)]; + + const WeightsInfo query_weights_info(/*reshape_weights*/ false, kernel_width, kernel_height, num_kernels, false, arm_compute::WeightFormat::ANY); + const bool kernel_found = bool(ConvolutionFunction::has_opt_impl(_computed_weight_format, &src_tensor_info, &weight_tensor_info, + &bias_tensor_info, &dst_tensor_info, conv_info, query_weights_info)); + // Make surethat the setup founds a fixed-format kernel as requested by the test case. + ARM_COMPUTE_EXPECT(kernel_found, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format(_computed_weight_format), framework::LogLevel::ERRORS); + + const WeightsInfo weights_info(/*reshape_weights*/ false, kernel_width, kernel_height, num_kernels, false, _computed_weight_format); + configure_and_execute_kernel(src_tensor_info, weight_tensor_info, bias_tensor_info, dst_tensor_info, weights_info, conv_info, + dilation); + } + void compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, + const Size2D &dilation) + { + ARM_COMPUTE_UNUSED(input_shape, weights_shape, bias_shape, output_shape, info, + dilation); + + // Create reference + SimpleTensor<ScalarType> src{ input_shape, _data_type }; + SimpleTensor<ScalarType> weights{ weights_shape, _data_type }; + SimpleTensor<ScalarType> bias{ bias_shape, _data_type }; + fill(src, 0); + fill(bias, 1); + fill(weights, 3); + _reference = reference::convolution_layer<ScalarType>(src, weights, bias, output_shape, info, dilation, 1 /*num_groups*/); + } + DataLayout _data_layout{}; + DataType _data_type{}; + +protected: + std::unique_ptr<ConvolutionFunction> conv{}; + arm_compute::WeightFormat _computed_weight_format{ arm_compute::WeightFormat::UNSPECIFIED }; + TensorClass _target{}; + SimpleTensor<ScalarType> _reference{}; +}; + +template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math> +class VariableWeightsFixture : public VariableWeightsFixtureBaseClass<ConvolutionFunction, TensorClass, AccessorType, ScalarType, enable_fast_math> +{ + void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info, + const PadStrideInfo &conv_info, + const Size2D &dilation) + { + this->conv->configure(&src_tensor_info, &weight_tensor_info, &bias_tensor_info, &dst_tensor_info, conv_info, weights_info, dilation, ActivationLayerInfo(), enable_fast_math); + + // Allocate input tensors + auto src = create_tensor<TensorClass>(src_tensor_info); + auto weights_original = create_tensor<TensorClass>(weight_tensor_info); + const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info, this->_computed_weight_format); + auto weights_transformed = create_tensor<TensorClass>(new_tensor_info); + auto bias = create_tensor<TensorClass>(bias_tensor_info); + src.allocator()->allocate(); + weights_original.allocator()->allocate(); + weights_transformed.allocator()->allocate(); + bias.allocator()->allocate(); + // Allocate destination tensor + this->_target = create_tensor<TensorClass>(dst_tensor_info); + this->_target.allocator()->allocate(); + + // Prepare source and biases that are left unchanged. + this->fill(AccessorType(src), 0); + this->fill(AccessorType(bias), 1); + + // First run + this->fill(AccessorType(weights_original), 2); + rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format); + ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weights_transformed }, { TensorType::ACL_SRC_2, &bias }, { TensorType::ACL_DST, &(this->_target) } }; + this->conv->run(run_pack); + // Second run, with new weights + this->fill(AccessorType(weights_original), 3); + rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format); + this->conv->run(run_pack); + src.allocator()->free(); + weights_original.allocator()->free(); + weights_transformed.allocator()->free(); + bias.allocator()->free(); + } +}; + +template <typename ConvolutionFunction, typename TensorClass, typename AccessorType, typename ScalarType, bool enable_fast_math> +class VariableWeightsFixtureNEInterface : public VariableWeightsFixtureBaseClass<ConvolutionFunction, TensorClass, AccessorType, ScalarType, enable_fast_math> +{ + void configure_and_execute_kernel(TensorInfo src_tensor_info, TensorInfo weight_tensor_info, TensorInfo bias_tensor_info, TensorInfo dst_tensor_info, const WeightsInfo weights_info, + const PadStrideInfo &conv_info, + const Size2D &dilation) + { + // Allocate input tensors + auto src = create_tensor<TensorClass>(src_tensor_info); + auto weights_original = create_tensor<TensorClass>(weight_tensor_info); + const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info, this->_computed_weight_format); + auto weights_transformed = create_tensor<TensorClass>(new_tensor_info); + auto bias = create_tensor<TensorClass>(bias_tensor_info); + src.allocator()->allocate(); + weights_original.allocator()->allocate(); + weights_transformed.allocator()->allocate(); + bias.allocator()->allocate(); + // Allocate destination tensor + this->_target = create_tensor<TensorClass>(dst_tensor_info); + this->_target.allocator()->allocate(); + this->conv->configure(&src, &weights_transformed, &bias, &(this->_target), conv_info, weights_info, dilation, ActivationLayerInfo(), enable_fast_math); + // Prepare source and biases that are left unchanged. + this->fill(AccessorType(src), 0); + this->fill(AccessorType(bias), 1); + + // First run + this->fill(AccessorType(weights_original), 2); + rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format); + this->conv->run(); + // Second run, with new weights + this->fill(AccessorType(weights_original), 3); + rearrange_data<ScalarType, AccessorType>(AccessorType(weights_original), AccessorType(weights_transformed), this->_computed_weight_format); + this->conv->run(); + src.allocator()->free(); + weights_original.allocator()->free(); + weights_transformed.allocator()->free(); + bias.allocator()->free(); + } +}; + +template <typename ConvolutionClass, bool enable_fast_math> +class HasOptImplFixture : public framework::Fixture +{ +public: + void setup(DataType data_type, arm_compute::WeightFormat query_weight_format) + { + auto conv = std::make_unique<ConvolutionClass>(); + const auto src_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC); + const auto weight_info = TensorInfo(TensorShape(64, 3U, 3U, 64U), 1, enable_fast_math ? DataType::BFLOAT16 : data_type, DataLayout::NHWC); + const auto bias_info = TensorInfo(TensorShape(64U), 1, data_type, DataLayout::NHWC); + auto dst_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC); + const auto conv_info = PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR); + const WeightsInfo weights_info(false, 3U, 3U, 64U, false, query_weight_format); + _kernel_found = bool(ConvolutionClass::has_opt_impl(_computed_weight_format, &src_info, &weight_info, + &bias_info, &dst_info, conv_info, weights_info, + Size2D(1U, 1U) /*dilation*/, ActivationLayerInfo() /*act_info*/, enable_fast_math)); + } + +protected: + bool _kernel_found{ false }; + arm_compute::WeightFormat _computed_weight_format{ arm_compute::WeightFormat::UNSPECIFIED }; +}; +#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS + } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE */ + +#endif // ACL_TESTS_VALIDATION_FIXTURES_CONVOLUTIONLAYERFIXTURE_H diff --git a/tests/validation/fixtures/CopyFixture.h b/tests/validation/fixtures/CopyFixture.h index feb1d7dfb5..f5e711a500 100644 --- a/tests/validation/fixtures/CopyFixture.h +++ b/tests/validation/fixtures/CopyFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class CopyFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type) { _target = compute_target(input_shape, output_shape, data_type); @@ -61,7 +60,7 @@ protected: TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type) { // Check if indeed the input shape can be reshape to the output one - ARM_COMPUTE_EXPECT(input_shape.total_size() == output_shape.total_size(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(input_shape.total_size() == output_shape.total_size()); // Create tensors TensorType src = create_tensor<TensorType>(input_shape, data_type); @@ -72,15 +71,15 @@ protected: copy.configure(&src, &dst); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); diff --git a/tests/validation/fixtures/CropResizeFixture.h b/tests/validation/fixtures/CropResizeFixture.h index 4f6389155a..30a3fd8569 100644 --- a/tests/validation/fixtures/CropResizeFixture.h +++ b/tests/validation/fixtures/CropResizeFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,6 @@ #include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/IAccessor.h" -#include "tests/RawLutAccessor.h" #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" #include "tests/validation/Helpers.h" @@ -47,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class CropResizeFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape src_shape, TensorShape boxes_shape, Coordinates2D crop_size, InterpolationPolicy method, float extrapolation_value, bool is_outside_bounds, DataType data_type) { @@ -88,15 +86,15 @@ protected: FunctionType crop; crop.configure(&src, &boxes, &boxes_ind, &dst, crop_size, method, extrapolation_value); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h index 6ea2335ae9..83170c413c 100644 --- a/tests/validation/fixtures/DeconvolutionLayerFixture.h +++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -42,22 +42,24 @@ namespace validation { using namespace arm_compute::misc::shape_calculator; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW> class DeconvolutionLayerFixtureBase : public framework::Fixture { public: using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value || std::is_same<typename std::decay<T>::type, int8_t>::value, int32_t, T >::type; public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, - DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias) + DataType data_type, DataType weights_data_type, DataLayout data_layout, + QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, QuantizationInfo weights_quantization_info, bool add_bias) { - _data_type = data_type; - _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - _data_layout = data_layout; - _input_quantization_info = input_quantization_info; - _output_quantization_info = output_quantization_info; + _data_type = data_type; + _weights_data_type = weights_data_type; + _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; + _data_layout = data_layout; + _input_quantization_info = input_quantization_info; + _output_quantization_info = output_quantization_info; + _weights_quantization_info = weights_quantization_info; _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, add_bias); _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, add_bias); @@ -72,14 +74,34 @@ protected: case DataType::QASYMM8: { std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second); + std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second); library->fill(tensor, distribution, i); break; } case DataType::QASYMM8_SIGNED: { std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second); + std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second); + library->fill(tensor, distribution, i); + break; + } + case DataType::QSYMM8_PER_CHANNEL: + { + int min_bound = 128; + int max_bound = -127; + for(size_t i = 0; i < _input_quantization_info.scale().size(); i++) + { + std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f); + if(bounds.first < min_bound) + { + min_bound = bounds.first; + } + if(bounds.second > max_bound) + { + max_bound = bounds.second; + } + } + std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound); library->fill(tensor, distribution, i); break; } @@ -113,8 +135,7 @@ protected: { case DataType::S32: { - const int32_t value = static_cast<int32_t>(tensor.quantization_info().uniform().offset); - library->fill_tensor_value(tensor, value); + library->fill_tensor_value(tensor, 0); break; } case DataType::F16: @@ -140,7 +161,7 @@ protected: // Create tensors TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _input_quantization_info, _data_layout); - TensorType weights = create_tensor<TensorType>(weights_shape, _data_type, 1, _input_quantization_info, _data_layout); + TensorType weights = create_tensor<TensorType>(weights_shape, _weights_data_type, 1, _weights_quantization_info, _data_layout); TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _input_quantization_info, _data_layout); TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _output_quantization_info, _data_layout); @@ -148,13 +169,13 @@ protected: FunctionType conv; conv.configure(&src, &weights, add_bias ? &bias : nullptr, &dst, info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights.info()->is_resizable()); if(add_bias) { - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); } - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); @@ -165,13 +186,13 @@ protected: } dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights.info()->is_resizable()); if(add_bias) { - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); } - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); @@ -183,7 +204,6 @@ protected: // Compute DeconvolutionLayer function conv.run(); - return dst; } @@ -192,7 +212,7 @@ protected: { // Create reference SimpleTensor<T> src{ input_shape, _data_type, 1, _input_quantization_info }; - SimpleTensor<T> weights{ weights_shape, _data_type, 1, _input_quantization_info }; + SimpleTensor<TW> weights{ weights_shape, _weights_data_type, 1, _weights_quantization_info }; SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _input_quantization_info }; // Fill reference @@ -207,28 +227,27 @@ protected: { fill_zeros(bias); } - - return reference::deconvolution_layer<T>(src, weights, bias, output_shape, info, _output_quantization_info); + return reference::deconvolution_layer<T, TW>(src, weights, bias, output_shape, info, _output_quantization_info); } TensorType _target{}; SimpleTensor<T> _reference{}; DataType _data_type{}; + DataType _weights_data_type{}; DataType _bias_data_type{}; DataLayout _data_layout{}; QuantizationInfo _input_quantization_info{}; QuantizationInfo _output_quantization_info{}; + QuantizationInfo _weights_quantization_info{}; }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y> -class DeconvolutionValidationFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T> +class DeconvolutionValidationFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T> { public: - template <typename...> void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias) { - ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); const TensorShape bias_shape(num_kernels); const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL); @@ -236,20 +255,18 @@ public: TensorInfo input_info(input_shape, 1, data_type); TensorInfo weights_info(weights_shape, 1, data_type); TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); - DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, QuantizationInfo(), - QuantizationInfo(), add_bias); + DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout, QuantizationInfo(), + QuantizationInfo(), QuantizationInfo(), add_bias); } }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y> -class DeconvolutionValidationAsymmFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T> +class DeconvolutionValidationAsymmFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T> { public: - template <typename...> void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int pad_left, unsigned int pad_right, unsigned int pad_top, unsigned int pad_bottom, unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias) { - ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); const TensorShape bias_shape(num_kernels); const PadStrideInfo info(sx, sy, pad_left, pad_right, pad_top, pad_bottom, DimensionRoundingType::CEIL); @@ -257,20 +274,18 @@ public: TensorInfo input_info(input_shape, 1, data_type); TensorInfo weights_info(weights_shape, 1, data_type); TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); - DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, QuantizationInfo(), - QuantizationInfo(), add_bias); + DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout, QuantizationInfo(), + QuantizationInfo(), QuantizationInfo(), add_bias); } }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y> -class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T> +class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T> { public: - template <typename...> void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias) { - ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); const TensorShape bias_shape(num_kernels); const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL); @@ -278,8 +293,38 @@ public: TensorInfo input_info(input_shape, 1, data_type, input_quantization_info); TensorInfo weights_info(weights_shape, 1, data_type, input_quantization_info); TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); - DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, input_quantization_info, - output_quantization_info, add_bias); + DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_type, data_layout, + input_quantization_info, + output_quantization_info, input_quantization_info, add_bias); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW, unsigned int kernel_size_x, unsigned int kernel_size_y> +class DeconvolutionValidationQuantizedPerChannelFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, TW> +{ +public: + void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, + unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias, + DataType weights_data_type) + { + const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); + const TensorShape bias_shape(num_kernels); + const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL); + auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info); + TensorInfo input_info(input_shape, 1, data_type, input_quantization_info); + TensorInfo weights_info(weights_shape, 1, weights_data_type, input_quantization_info); + TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); + + std::vector<float> weights_scales{}; + std::mt19937 gen(library->seed()); + std::uniform_real_distribution<float> dis(0.01f, 1.f); + for(size_t i = 0; i < output_shape[2]; ++i) + { + weights_scales.push_back(dis(gen)); + } + DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, TW>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, weights_data_type, data_layout, + input_quantization_info, + output_quantization_info, QuantizationInfo(weights_scales), add_bias); } }; diff --git a/tests/validation/fixtures/DepthConvertLayerFixture.h b/tests/validation/fixtures/DepthConvertLayerFixture.h index 937a1a06a9..f55d20bf3e 100644 --- a/tests/validation/fixtures/DepthConvertLayerFixture.h +++ b/tests/validation/fixtures/DepthConvertLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class DepthConvertLayerValidationBaseFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, QuantizationInfo quantization_info) { _shift = shift; @@ -61,13 +60,13 @@ protected: if(is_data_type_quantized(tensor.data_type())) { std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second); + std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second); library->fill(tensor, distribution, i); } else { - // When converting S32 to F16, both reference and Neon implementations are + or - infinity outside the F16 range. + // When converting S32 to F16, both reference and Compute Library implementations are + or - infinity outside the F16 range. if(dt_in == DataType::S32 && dt_out == DataType::F16) { std::uniform_int_distribution<int32_t> distribution_s32(-65504, 65504); @@ -90,15 +89,15 @@ protected: FunctionType depth_convert; depth_convert.configure(&src, &dst, policy, shift); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0, dt_in, dt_out); @@ -130,7 +129,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class DepthConvertLayerValidationFixture : public DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2> { public: - template <typename...> void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift) { DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, @@ -142,7 +140,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class DepthConvertLayerValidationQuantizedFixture : public DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2> { public: - template <typename...> void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, QuantizationInfo quantization_info) { DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy, diff --git a/tests/validation/fixtures/DepthToSpaceLayerFixture.h b/tests/validation/fixtures/DepthToSpaceLayerFixture.h index a254ba4322..abe3d8b22f 100644 --- a/tests/validation/fixtures/DepthToSpaceLayerFixture.h +++ b/tests/validation/fixtures/DepthToSpaceLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -39,7 +39,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class DepthToSpaceLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, int32_t block_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout) { _target = compute_target(input_shape, block_shape, output_shape, data_type, data_layout); @@ -73,15 +72,15 @@ protected: FunctionType depth_to_space; depth_to_space.configure(&input, &output, block_shape); - ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output.info()->is_resizable()); // Allocate tensors input.allocator()->allocate(); output.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output.info()->is_resizable()); // Fill tensors fill(AccessorType(input), 0); diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h index d9806b5c84..6e2e3a3846 100644 --- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_DEPTHWISE_CONVOLUTION_FIXTURE -#define ARM_COMPUTE_TEST_DEPTHWISE_CONVOLUTION_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DEPTHWISECONVOLUTIONLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DEPTHWISECONVOLUTIONLAYERFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -38,6 +38,7 @@ #include "utils/Utils.h" +#include <cstdint> #include <random> namespace arm_compute @@ -54,35 +55,83 @@ class DepthwiseConvolutionLayerValidationGenericFixture : public framework::Fixt public: using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type; + void setup_quantization(TensorShape input_shape, TensorShape weights_shape, QuantizationInfo &input_q_info, + QuantizationInfo &weights_q_info, DataType data_type) + { + ARM_COMPUTE_UNUSED(input_shape); + const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max()); + const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min()); + + std::mt19937 generator(library->seed() + _hash); + std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f); + std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max); + + const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + + const int32_t offset_lhs = distribution_t(generator); + const int32_t offset_rhs = distribution_t(generator); + + _input_quantization_info = QuantizationInfo(scale_lhs, offset_lhs); + _weights_quantization_info = QuantizationInfo(scale_rhs, offset_rhs); + + QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(input_q_info, weights_q_info, + weights_shape.y() /* heights */, weights_shape.x() /* width */, 1 /* channels */, + data_type, 0.5f /* bias_fraction */); + + _output_quantization_info = q_hint.q_info; + _min_bias = q_hint.bias_min; + _max_bias = q_hint.bias_max; + } + public: - template <typename...> void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType input_data_type, DataType weights_data_type, QuantizationInfo input_quantization_info, QuantizationInfo weights_quantization_info, QuantizationInfo output_quantization_info, - DataLayout data_layout, ActivationLayerInfo act_info) + DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false, bool in_place = false, bool run_twice = false) { + ARM_COMPUTE_ERROR_ON(mixed_layout && in_place); + // This hash is used by random generators. There may be hash collisions but + // this is intentional as it's a very easy way to make the the current + // random generation process almost different for many test configurations, + // which were using the same set of values before. + _hash = in_shape[0] + in_shape[1] + in_shape[2] + in_shape[3] + + kernel_size.width + kernel_size.height + dilation.x() + + dilation.y() + pad_stride_info.pad_bottom() + pad_stride_info.pad_left() + pad_stride_info.pad_right() + pad_stride_info.pad_top(); + + _mixed_layout = mixed_layout; _input_shape = in_shape; _input_data_type = input_data_type; _weights_data_type = weights_data_type; - _input_quantization_info = input_quantization_info; - _weights_quantization_info = weights_quantization_info; - _output_quantization_info = output_quantization_info; _data_layout = data_layout; _pad_stride_info = pad_stride_info; _act_info = act_info; _depth_multiplier = depth_multiplier; _dilation = dilation; + _in_place = in_place; + _run_twice = run_twice; _bias_data_type = is_data_type_quantized(_input_data_type) ? DataType::S32 : _input_data_type; _weights_shape = TensorShape(kernel_size.width, kernel_size.height); - const TensorInfo in_info(_input_shape, 1, _input_data_type); - const TensorInfo we_info(_weights_shape, 1, _weights_data_type); - _output_shape = compute_depthwise_convolution_shape(in_info, we_info, _pad_stride_info, _depth_multiplier, _dilation); + const TensorInfo in_info(_input_shape, 1, _input_data_type); + const TensorInfo we_info(_weights_shape, 1, _weights_data_type); + const ConvolutionInfo info{ _pad_stride_info, _depth_multiplier, _act_info, _dilation }; + _output_shape = compute_depthwise_convolution_shape(in_info, we_info, info); _weights_shape.set(2, _output_shape.z()); _biases_shape = TensorShape(_weights_shape[2]); + + _input_quantization_info = input_quantization_info; + _weights_quantization_info = weights_quantization_info; + _output_quantization_info = output_quantization_info; + + if(is_data_type_quantized(_input_data_type) && !is_data_type_quantized_symmetric(weights_data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY)) + { + setup_quantization(in_shape, _weights_shape, _input_quantization_info, _weights_quantization_info, _input_data_type); + _use_dynamic_output_quant = true; + } } void configure_target() @@ -99,18 +148,33 @@ public: } // Create tensors - _src = create_tensor<TensorType>(input_shape, _input_data_type, 1, _input_quantization_info, _data_layout); - _weights = create_tensor<TensorType>(weights_shape, _weights_data_type, 1, _weights_quantization_info, _data_layout); - _biases = create_tensor<TensorType>(_biases_shape, _bias_data_type, 1, _input_quantization_info, _data_layout); - _target = create_tensor<TensorType>(output_shape, _input_data_type, 1, _output_quantization_info, _data_layout); + _src = create_tensor<TensorType>(input_shape, _input_data_type, 1, _input_quantization_info, _data_layout); + _weights = create_tensor<TensorType>(weights_shape, _weights_data_type, 1, _weights_quantization_info, _data_layout); + if(_run_twice) { + _weights.info()->set_are_values_constant(false); + } + _biases = create_tensor<TensorType>(_biases_shape, _bias_data_type, 1, _input_quantization_info, _data_layout); + TensorType *target_to_use = nullptr; + if(!_in_place) + { + _target = create_tensor<TensorType>(output_shape, _input_data_type, 1, _output_quantization_info, _data_layout); + target_to_use = &_target; + } + + add_padding_x({ &_src, &_biases }, _data_layout); + add_padding_x({ &_weights }, _data_layout, true); + if(!_in_place) + { + add_padding_x({ &_target }, _data_layout); + } // Create Depthwise Convolution configure function - _dwc.configure(&_src, &_weights, &_biases, &_target, _pad_stride_info, _depth_multiplier, _act_info, _dilation); + _dwc.configure(&_src, &_weights, &_biases, target_to_use, _pad_stride_info, _depth_multiplier, _act_info, _dilation); - ARM_COMPUTE_EXPECT(_src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(_weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(_biases.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(_target.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(_src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(_weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(_biases.info()->is_resizable()); + ARM_COMPUTE_ASSERT(_target.info()->is_resizable()); } void allocate_and_run_target() @@ -119,20 +183,41 @@ public: _src.allocator()->allocate(); _weights.allocator()->allocate(); _biases.allocator()->allocate(); - _target.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!_src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!_weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!_biases.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!_target.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!_src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable()); + + if(!_in_place) + { + _target.allocator()->allocate(); + ARM_COMPUTE_ASSERT(!_target.info()->is_resizable()); + } // Fill tensors - fill(AccessorType(_src), 0); - fill(AccessorType(_weights), 1); - fill(AccessorType(_biases), 2); + fill(AccessorType(_src), 0 + _hash); + fill(AccessorType(_weights), 1 + _hash); + fill(AccessorType(_biases), 2 + _hash); + + // Run with variable input + if(_run_twice) { + _dwc.run(); + + // Fill tensors with a new seed + fill(AccessorType(_src), 3 + _hash); + fill(AccessorType(_weights), 4 + _hash); + fill(AccessorType(_biases), 5 + _hash); + } - // Compute function - _dwc.run(); + if(_mixed_layout) + { + mix_layout(_dwc, _src, _target); + } + else + { + // Compute function + _dwc.run(); + } } void compute_reference() @@ -141,15 +226,41 @@ public: SimpleTensor<TW> weights{ _weights_shape, _weights_data_type, 1, _weights_quantization_info }; SimpleTensor<TBias> biases{ _biases_shape, _bias_data_type, 1, _input_quantization_info }; - fill(src, 0); - fill(weights, 1); - fill(biases, 2); + fill(src, 0 + _hash); + fill(weights, 1 + _hash); + fill(biases, 2 + _hash); + + if(_run_twice) { + SimpleTensor<T> depth_out = reference::depthwise_convolution(src, weights, biases, _output_shape, _pad_stride_info, _depth_multiplier, _dilation, _output_quantization_info); + if(_act_info.enabled()) { + reference::activation_layer<T>(depth_out, _act_info); + } + + fill(src, 3 + _hash); + fill(weights, 4 + _hash); + fill(biases, 5 + _hash); + } SimpleTensor<T> depth_out = reference::depthwise_convolution(src, weights, biases, _output_shape, _pad_stride_info, _depth_multiplier, _dilation, _output_quantization_info); _reference = (_act_info.enabled()) ? reference::activation_layer<T>(depth_out, _act_info) : depth_out; } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + ARM_COMPUTE_ERROR_ON(_in_place); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(_data_layout); + dst.info()->set_data_layout(_data_layout); + } + template <typename U> void fill(U &&tensor, int i) { @@ -157,32 +268,77 @@ protected: { case DataType::QASYMM8: { - std::uniform_int_distribution<uint8_t> distribution(0, 10); - library->fill(tensor, distribution, i); + if(_use_dynamic_output_quant) + { + std::uniform_int_distribution<int32_t> distribution(0, 255); + library->fill(tensor, distribution, i); + } + else + { + // Legacy initialization in case the output quantization info can't be reliably estimated + std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second); + library->fill(tensor, distribution, i); + } break; } case DataType::QASYMM8_SIGNED: + { + if(_use_dynamic_output_quant) + { + std::uniform_int_distribution<int32_t> distribution(-128, 127); + library->fill(tensor, distribution, i); + } + else + { + // Legacy initialization in case the output quantization info can't be reliably estimated + std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second); + library->fill(tensor, distribution, i); + } + break; + } case DataType::QSYMM8_PER_CHANNEL: { - std::uniform_int_distribution<int8_t> distribution(-10, 10); + int min_bound = 128; + int max_bound = -127; + for(size_t i = 0; i < _weights_quantization_info.scale().size(); i++) + { + std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i); + if(bounds.first < min_bound) + { + min_bound = bounds.first; + } + if(bounds.second > max_bound) + { + max_bound = bounds.second; + } + } + std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound); library->fill(tensor, distribution, i); break; } - case DataType::F16: + case DataType::S32: { - arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f }; + std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias); library->fill(tensor, distribution, i); break; } - case DataType::F32: + case DataType::BFLOAT16: { - std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); + arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{ -1.0f, 1.0f }; library->fill(tensor, distribution, i); break; } - case DataType::S32: + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f }; + library->fill(tensor, distribution, i); + break; + } + case DataType::F32: { - std::uniform_int_distribution<int32_t> distribution(-100, 100); + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); library->fill(tensor, distribution, i); break; } @@ -214,19 +370,33 @@ protected: ActivationLayerInfo _act_info{}; unsigned int _depth_multiplier{}; Size2D _dilation{}; + bool _mixed_layout{ false }; + bool _in_place{ false }; + bool _run_twice{ false }; + bool _use_dynamic_output_quant{false}; + + int32_t _hash{0}; + // Random initialization limits + // Default values are previously handcrafted limits + // that sould be used when we don't use dynamic quantization + int32_t _min_bias{-100}; + int32_t _max_bias{100}; + int32_t _min_u8{0}; + int32_t _max_u8{50}; + int32_t _min_s8{-25}; + int32_t _max_s8{25}; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false, bool in_place = false, bool run_twice = false> class DepthwiseConvolutionLayerValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T> { public: - template <typename...> void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) { DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, data_type, data_type, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), - data_layout, act_info); + data_layout, act_info, mixed_layout, in_place, run_twice); } }; @@ -234,7 +404,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class DepthwiseConvolutionLayerNativeValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T> { public: - template <typename...> void setup(size_t width, size_t height, size_t channel, size_t batch, Size2D kernel_size, size_t depth_multiplier, Size2D dilation, Size2D stride, bool padding_valid, DataType data_type, DataLayout data_layout) { @@ -249,7 +418,7 @@ public: if(padding_valid) { - _conv_info = PadStrideInfo(); + _conv_info = PadStrideInfo(stride.width, stride.height); } else { @@ -274,13 +443,20 @@ public: _biases = create_tensor<TensorType>(_biases_shape, _data_type, 1, QuantizationInfo(), _data_layout); _target = create_tensor<TensorType>(TensorShape(), _data_type, 1, QuantizationInfo(), _data_layout); - // Create Depthwise Convolution configure function - _dwc.configure(&_src, &_weights, &_biases, &_target, _conv_info, _depth_multiplier, _dilation); + add_padding_x({ &_src, &_biases, &_target }, _data_layout); + add_padding_x({ &_weights }, _data_layout, true); - ARM_COMPUTE_EXPECT(_src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(_weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(_biases.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(_target.info()->is_resizable(), framework::LogLevel::ERRORS); + // Create Depthwise Convolution configure function + const ConvolutionInfo info + { + _conv_info, _depth_multiplier, ActivationLayerInfo(), _dilation + }; + _dwc.configure(_src.info(), _weights.info(), _biases.info(), _target.info(), info); + + ARM_COMPUTE_ASSERT(_src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(_weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(_biases.info()->is_resizable()); + ARM_COMPUTE_ASSERT(_target.info()->is_resizable()); } void allocate_and_run_target() @@ -291,18 +467,24 @@ public: _biases.allocator()->allocate(); _target.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!_src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!_weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!_biases.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!_target.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!_src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!_target.info()->is_resizable()); // Fill tensors fill(AccessorType(_src), 0); fill(AccessorType(_weights), 1); fill(AccessorType(_biases), 2); + arm_compute::ITensorPack pack; + pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_0, &_src); + pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_1, &_weights); + pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_2, &_biases); + pack.add_tensor(arm_compute::TensorType::ACL_DST, &_target); + // Compute function - _dwc.run(); + _dwc.run(pack); } void compute_reference() @@ -315,9 +497,9 @@ public: fill(weights, 1); fill(biases, 2); - const TensorShape dst_shape = compute_depthwise_convolution_shape(TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), _conv_info, - _depth_multiplier, _dilation); - _reference = reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation); + const ConvolutionInfo info{ _conv_info, _depth_multiplier, ActivationLayerInfo(), _dilation }; + const TensorShape dst_shape = compute_depthwise_convolution_shape(TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), info); + _reference = reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation); } protected: @@ -355,20 +537,21 @@ protected: unsigned int _depth_multiplier{}; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool in_place = false> class DepthwiseConvolutionLayerNativeConfigurableValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T> { public: - template <typename...> void setup(size_t width, size_t height, size_t channel, size_t batch, Size2D kernel_size, size_t depth_multiplier, Size2D dilation, Size2D stride, bool padding_valid, DataType data_type, - DataLayout data_layout, const ActivationLayerInfo &act_info, unsigned int n0) + DataLayout data_layout, const ActivationLayerInfo &act_info, unsigned int n0, bool export_to_cl_image) { - _dilation = dilation; - _depth_multiplier = depth_multiplier; - _data_type = data_type; - _data_layout = data_layout; - _act_info = act_info; - _n0 = n0; + _dilation = dilation; + _depth_multiplier = depth_multiplier; + _data_type = data_type; + _data_layout = data_layout; + _act_info = act_info; + _n0 = n0; + _export_to_cl_image = export_to_cl_image; + _in_place = in_place; _input_shape = TensorShape(width, height, channel, batch); _weights_shape = TensorShape(kernel_size.width, kernel_size.height, channel * _depth_multiplier); @@ -376,16 +559,29 @@ public: if(padding_valid) { - _conv_info = PadStrideInfo(); + _conv_info = calculate_same_pad(_input_shape, _weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, _dilation); } else { - _conv_info = calculate_same_pad(_input_shape, _weights_shape, PadStrideInfo(stride.width, stride.height), DataLayout::NCHW, _dilation); + _conv_info = PadStrideInfo(stride.width, stride.height); } } void configure_target() { +#if defined(ARM_COMPUTE_OPENCL_ENABLED) + if(_export_to_cl_image) + { + _validate_output &= image2d_from_buffer_supported(CLKernelLibrary::get().get_device()); + _validate_output &= (get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) != 0); + } +#endif // ARM_COMPUTE_OPENCL_ENABLED + + if(!_validate_output) + { + return; + } + TensorShape input_shape = _input_shape; TensorShape weights_shape = _weights_shape; @@ -396,50 +592,89 @@ public: } // Create tensors - _src = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout); - _weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout); - _biases = create_tensor<TensorType>(_biases_shape, _data_type, 1, QuantizationInfo(), _data_layout); - _target = create_tensor<TensorType>(TensorShape(), _data_type, 1, QuantizationInfo(), _data_layout); + _src = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout); + _weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout); + _biases = create_tensor<TensorType>(_biases_shape, _data_type, 1, QuantizationInfo(), _data_layout); + TensorType *target_to_use = nullptr; + if(!_in_place) + { + _target = create_tensor<TensorType>(TensorShape(), _data_type, 1, QuantizationInfo(), _data_layout); + target_to_use = &_target; + } + + DWCComputeKernelInfo dwc_info; + dwc_info.n0 = _n0; + dwc_info.m0 = _conv_info.stride().first == 1 && _dilation.x() == 1 ? 8 : 1; + dwc_info.export_input_to_cl_image = false; + dwc_info.export_weights_to_cl_image = _export_to_cl_image; - DWCWeightsKernelInfo dwc_weights_info; - dwc_weights_info.n0 = _n0; + const ConvolutionInfo conv_kernel_info + { + _conv_info, _depth_multiplier, _act_info, _dilation + }; - DWCKernelInfo dwc_info; - dwc_info.activation_info = _act_info; + add_padding_x({ &_src, &_biases, &_target }, _data_layout); + add_padding_x({ &_weights }, _data_layout, _export_to_cl_image); // Don't add left padding if cl image will be used // Create Depthwise Convolution configure function - _dwc.configure(&_src, &_weights, &_biases, &_target, dwc_weights_info, dwc_info, _conv_info, _depth_multiplier, _dilation); + _dwc.configure(&_src, &_weights, &_biases, target_to_use, dwc_info, conv_kernel_info); - ARM_COMPUTE_EXPECT(_src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(_weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(_biases.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(_target.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(_src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(_weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(_biases.info()->is_resizable()); + ARM_COMPUTE_ASSERT(_target.info()->is_resizable()); } void allocate_and_run_target() { + if(!_validate_output) + { + return; + } + // Allocate tensors _src.allocator()->allocate(); _weights.allocator()->allocate(); _biases.allocator()->allocate(); - _target.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!_src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!_weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!_biases.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!_target.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!_src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable()); + if(!_in_place) + { + _target.allocator()->allocate(); + ARM_COMPUTE_ASSERT(!_target.info()->is_resizable()); + } // Fill tensors fill(AccessorType(_src), 0); fill(AccessorType(_weights), 1); fill(AccessorType(_biases), 2); + // Test Multi DataLayout graph cases, when the data layout changes after configure + _src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + if(!_in_place) + { + _target.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + } + // Compute function _dwc.run(); + + // Reinstating original data layout for the test suite to properly check the values + if(!_in_place) + { + _target.info()->set_data_layout(_data_layout); + } } void compute_reference() { + if(!_validate_output) + { + return; + } + SimpleTensor<T> src{ _input_shape, _data_type }; SimpleTensor<T> weights{ _weights_shape, _data_type }; SimpleTensor<T> biases{ _biases_shape, _data_type }; @@ -448,9 +683,9 @@ public: fill(weights, 1); fill(biases, 2); - const TensorShape dst_shape = compute_depthwise_convolution_shape(TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), _conv_info, - _depth_multiplier, _dilation); - _reference = reference::activation_layer(reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation), _act_info); + const ConvolutionInfo info{ _conv_info, _depth_multiplier, _act_info, _dilation }; + const TensorShape dst_shape = compute_depthwise_convolution_shape(TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), info); + _reference = reference::activation_layer(reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation), _act_info); } protected: @@ -494,27 +729,28 @@ protected: Size2D _dilation{}; unsigned int _depth_multiplier{}; unsigned int _n0{}; + bool _export_to_cl_image{}; + bool _validate_output{ true }; + bool _in_place{ false }; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false, bool in_place = false> class DepthwiseConvolutionLayerValidationQuantizedFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T> { public: - template <typename...> void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, ActivationLayerInfo act_info) { DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, data_type, data_type, input_quantization_info, input_quantization_info, output_quantization_info, - data_layout, act_info); + data_layout, act_info, mixed_layout, in_place); } }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW, bool in_place = false> class DepthwiseConvolutionLayerValidationQuantizedPerChannelFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW> { public: - template <typename...> void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType input_data_type, DataType weights_data_type, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, ActivationLayerInfo act_info) { @@ -532,10 +768,10 @@ public: DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, input_data_type, weights_data_type, input_quantization_info, QuantizationInfo(weights_scales), output_quantization_info, - data_layout, act_info); + data_layout, act_info, false, in_place); } }; } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_DEPTHWISE_CONVOLUTION_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_DEPTHWISECONVOLUTIONLAYERFIXTURE_H diff --git a/tests/validation/fixtures/DequantizationLayerFixture.h b/tests/validation/fixtures/DequantizationLayerFixture.h index 1c1f46a64c..4eb25a5bc5 100644 --- a/tests/validation/fixtures/DequantizationLayerFixture.h +++ b/tests/validation/fixtures/DequantizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class DequantizationValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType src_data_type, DataType dst_datatype, DataLayout data_layout) { _quantization_info = generate_quantization_info(src_data_type, shape.z()); @@ -77,15 +76,15 @@ protected: FunctionType dequantization_layer; dequantization_layer.configure(&src, &dst); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/DirectConvolution3DFixture.h b/tests/validation/fixtures/DirectConvolution3DFixture.h new file mode 100644 index 0000000000..e80ad2f54f --- /dev/null +++ b/tests/validation/fixtures/DirectConvolution3DFixture.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2021, 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTION3DFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTION3DFIXTURE_H + +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT +#include "tests/framework/Fixture.h" +#include "tests/validation/reference/ActivationLayer.h" +#include "tests/validation/reference/Conv3D.h" + +#include <random> + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +using namespace arm_compute::misc::shape_calculator; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DirectConvolution3DValidationGenericFixture : public framework::Fixture +{ +public: + using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type; + + void setup(const TensorShape &input_shape, int stride_x, int stride_y, int stride_z, int pad_x, int pad_y, int pad_z, unsigned int kernel_width, int kernel_height, int kernel_depth, + unsigned int num_kernels, bool has_bias, const ActivationLayerInfo &act_info, const DataType &data_type, const DataLayout &data_layout, + const QuantizationInfo &src_qinfo = QuantizationInfo(), const QuantizationInfo &weights_qinfo = QuantizationInfo(), const QuantizationInfo &dst_qinfo = QuantizationInfo()) + { + ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NDHWC); + + const TensorShape weights_shape(num_kernels, input_shape[0], kernel_width, kernel_height, kernel_depth); + const TensorShape bias_shape(num_kernels); + const DataType bias_data_type = is_data_type_quantized(data_type) ? DataType::S32 : data_type; + const Conv3dInfo conv3d_info(Size3D(stride_x, stride_y, stride_z), Padding3D(pad_x, pad_y, pad_z), act_info, Size3D(1U, 1U, 1U), DimensionRoundingType::FLOOR, false); + const TensorShape output_shape = compute_conv3d_shape(input_shape, weights_shape, conv3d_info); + + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, conv3d_info, has_bias, data_type, bias_data_type, data_layout, src_qinfo, weights_qinfo, dst_qinfo); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, conv3d_info, has_bias, data_type, bias_data_type, src_qinfo, weights_qinfo, dst_qinfo); + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + switch(tensor.data_type()) + { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f }; + library->fill(tensor, distribution, i); + break; + } + case DataType::F32: + { + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, i); + break; + } + default: + library->fill_tensor_uniform(tensor, i); + } + } + + TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const Conv3dInfo &conv3d_info, + bool has_bias, const DataType &data_type, const DataType &bias_data_type, const DataLayout &data_layout, const QuantizationInfo &src_qinfo, + const QuantizationInfo &weights_qinfo, const QuantizationInfo &dst_qinfo) + { + // Create tensors + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, src_qinfo, data_layout); + TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, weights_qinfo, data_layout); + TensorType bias = has_bias ? create_tensor<TensorType>(bias_shape, bias_data_type, 1, QuantizationInfo()) : TensorType(); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, dst_qinfo, data_layout); + + // Create and configure function + FunctionType conv{}; + conv.configure(&src, &weights, has_bias ? &bias : nullptr, &dst, conv3d_info); + + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + // Allocate tensors + src.allocator()->allocate(); + weights.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + + // Fill tensors + fill(AccessorType(src), 0); + fill(AccessorType(weights), 1); + + if(has_bias) + { + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + bias.allocator()->allocate(); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + fill(AccessorType(bias), 2); + } + + // Compute Direct Convolution 3D function + conv.run(); + + return dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, + const Conv3dInfo &conv3d_info, bool has_bias, const DataType &data_type, const DataType &bias_data_type, const QuantizationInfo &src_qinfo, + const QuantizationInfo &weights_qinfo, const QuantizationInfo &dst_qinfo) + { + // Create reference + SimpleTensor<T> src{ input_shape, data_type, 1, src_qinfo }; + SimpleTensor<T> weights{ weights_shape, data_type, 1, weights_qinfo }; + SimpleTensor<TBias> bias{ bias_shape, bias_data_type }; + SimpleTensor<T> dst{ output_shape, data_type, 1, dst_qinfo }; + + // Fill reference + fill(src, 0); + fill(weights, 1); + + if(has_bias) + { + fill(bias, 2); + } + + return reference::activation_layer(reference::conv3d<T, TBias>(src, weights, bias, dst, conv3d_info), conv3d_info.act_info); + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DirectConvolution3DValidationFixture : public DirectConvolution3DValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, int stride_x, int stride_y, int stride_z, int pad_x, int pad_y, int pad_z, unsigned int kernel_width, int kernel_height, int kernel_depth, + unsigned int num_kernels, bool has_bias, ActivationLayerInfo act_info, DataType data_type, DataLayout data_layout) + { + DirectConvolution3DValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, stride_z, pad_x, pad_y, pad_z, kernel_width, kernel_height, + kernel_depth, num_kernels, has_bias, act_info, data_type, data_layout); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DirectConvolution3DValidationQuantizedFixture : public DirectConvolution3DValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, int stride_x, int stride_y, int stride_z, int pad_x, int pad_y, int pad_z, unsigned int kernel_width, int kernel_height, int kernel_depth, + unsigned int num_kernels, bool has_bias, ActivationLayerInfo act_info, DataType data_type, DataLayout data_layout, QuantizationInfo src_qinfo, QuantizationInfo weights_qinfo, + QuantizationInfo dst_qinfo) + { + DirectConvolution3DValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, stride_z, pad_x, pad_y, pad_z, kernel_width, kernel_height, + kernel_depth, num_kernels, has_bias, act_info, data_type, data_layout, src_qinfo, + weights_qinfo, dst_qinfo); + } +}; +} // namespace validation +} // namespace test +} // namespace arm_compute + +#endif // ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTION3DFIXTURE_H diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h index 8e4de77535..6f204642ca 100644 --- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,6 +21,10 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ + +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H + #include "arm_compute/core/Helpers.h" #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -51,12 +55,54 @@ class DirectConvolutionValidationGenericFixture : public framework::Fixture public: using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type; - template <typename...> + void setup_quantization(const TensorShape &input_shape, const TensorShape &weights_shape, QuantizationInfo &input_q_info, + QuantizationInfo &weights_q_info, DataType data_type) + { + const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max()); + const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min()); + + std::mt19937 generator(library->seed() + _hash); + std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f); + std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max); + + const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + + const int32_t offset_lhs = distribution_t(generator); + const int32_t offset_rhs = distribution_t(generator); + + input_q_info = QuantizationInfo(scale_lhs, offset_lhs); + weights_q_info = QuantizationInfo(scale_rhs, offset_rhs); + + QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(input_q_info, weights_q_info, + weights_shape.y() /* heights */, weights_shape.x() /* width */, input_shape.z() /* channels */, + data_type, 0.5f /* bias_fraction */); + + _dst_q_info = q_hint.q_info; + _min_bias = q_hint.bias_min; + _max_bias = q_hint.bias_max; + + // Do not change here as these limits are the natural limits of the associated data types and + // are embeded in the computation of the dst quantization info. + _min_u8 = 0; + _max_u8 = 255; + _min_s8 = -128; + _max_s8 = 127; + } + void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, - DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) + DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false) { - _quantization_info = quantization_info; + // This hash is used by random generators. There may be hash collisions but + // this is intentional as it's a very easy way to make the the current + // random generation process almost different for many test configurations, + // which were using the same set of values before. + _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] + + stride_x + stride_y + pad_x + pad_y + kernel_size + num_kernels + mixed_layout + + (data_layout == DataLayout::NHWC); + _data_type = data_type; + _mixed_layout = mixed_layout; TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels); const TensorShape bias_shape(num_kernels); @@ -68,27 +114,66 @@ public: const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info); - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info); + QuantizationInfo input_q_info = quantization_info; + QuantizationInfo weights_q_info = quantization_info; + _dst_q_info = quantization_info; + + if(is_data_type_quantized(data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY)) + { + setup_quantization(input_shape, weights_shape, input_q_info, weights_q_info, data_type); + } + + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info, data_layout); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info); } - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) { ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN); ARM_COMPUTE_UNUSED(dilation); - _quantization_info = quantization_info; + // This hash is used by random generators. There may be hash collisions but + // this is intentional as it's a very easy way to make the the current + // random generation process almost different for many test configurations, + // which were using the same set of values before. + _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] + + weights_shape[0] + weights_shape[1] + weights_shape[2] + weights_shape[3] + dilation.x() + + dilation.y() + info.pad_bottom() + info.pad_left() + info.pad_right() + info.pad_top(); + _data_type = data_type; const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info); + QuantizationInfo input_q_info = quantization_info; + QuantizationInfo weights_q_info = quantization_info; + _dst_q_info = quantization_info; + + if(is_data_type_quantized(data_type) && (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY)) + { + setup_quantization(input_shape, weights_shape, input_q_info, weights_q_info, data_type); + } + + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info, data_layout); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, input_q_info, weights_q_info, act_info); } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + DataLayout data_layout = src.info()->data_layout(); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout); + dst.info()->set_data_layout(data_layout); + } + template <typename U> void fill(U &&tensor, int i) { @@ -96,14 +181,14 @@ protected: { case DataType::QASYMM8: { - std::uniform_int_distribution<uint8_t> distribution(0, 50); + std::uniform_int_distribution<uint32_t> distribution(_min_u8, _max_u8); library->fill(tensor, distribution, i); break; } case DataType::QASYMM8_SIGNED: { // Use small input range to avoid all the test results being saturated at the end. - std::uniform_int_distribution<int8_t> distribution(-25, 25); + std::uniform_int_distribution<int32_t> distribution(_min_s8, _max_s8); library->fill(tensor, distribution, i); break; } @@ -121,7 +206,7 @@ protected: } case DataType::S32: { - std::uniform_int_distribution<int32_t> distribution(-5, 5); + std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias); library->fill(tensor, distribution, i); break; } @@ -131,7 +216,7 @@ protected: } TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info, - DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout) + DataType data_type, DataType bias_data_type, QuantizationInfo input_q_info, QuantizationInfo weights_q_info, ActivationLayerInfo act_info, const DataLayout &data_layout) { if(data_layout == DataLayout::NHWC) { @@ -141,19 +226,22 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout); - TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout); - TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout); + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, input_q_info, data_layout); + TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, weights_q_info, data_layout); + TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, QuantizationInfo()); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, _dst_q_info, data_layout); + + add_padding_x({ &src, &bias, &dst }, data_layout); + add_padding_x({ &weights }, data_layout, input_shape[0] % 4 == 0); // Don't add left padding if cl image will be used // Create and configure function FunctionType conv; conv.configure(&src, &weights, &bias, &dst, info, act_info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); @@ -161,67 +249,86 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors - fill(AccessorType(src), 0); - fill(AccessorType(weights), 1); - fill(AccessorType(bias), 2); + fill(AccessorType(src), 0 + _hash); + fill(AccessorType(weights), 1 + _hash); + fill(AccessorType(bias), 2 + _hash); - // Compute NEConvolutionLayer function - conv.run(); + if(_mixed_layout) + { + mix_layout(conv, src, dst); + } + else + { + // Compute Convolution function + conv.run(); + } return dst; } SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info) + DataType data_type, DataType bias_data_type, QuantizationInfo input_q_info, QuantizationInfo weights_q_info, ActivationLayerInfo act_info) { // Create reference - SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info }; - SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info }; - SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info }; + SimpleTensor<T> src{ input_shape, data_type, 1, input_q_info }; + SimpleTensor<T> weights{ weights_shape, data_type, 1, weights_q_info }; + SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, QuantizationInfo() }; // Fill reference - fill(src, 0); - fill(weights, 1); - fill(bias, 2); + fill(src, 0 + _hash); + fill(weights, 1 + _hash); + fill(bias, 2 + _hash); - SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info); - return (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst; + SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info, + Size2D(1U, 1U) /* dilation */, 1 /* num_groups */, _dst_q_info); + SimpleTensor<T> dst2 = (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst; + return dst2; } TensorType _target{}; SimpleTensor<T> _reference{}; - QuantizationInfo _quantization_info{}; + QuantizationInfo _dst_q_info{}; DataType _data_type{}; + bool _mixed_layout{ false }; + int32_t _hash{0}; + + // Random initialization limits + // Default values are previously handcrafted limits + // that sould be used when we don't use dynamic quantization + int32_t _min_bias{-5}; + int32_t _max_bias{5}; + int32_t _min_u8{0}; + int32_t _max_u8{50}; + int32_t _min_s8{-25}; + int32_t _max_s8{25}; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info, DataLayout data_layout) { DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(), - act_info, data_layout); + act_info, data_layout, mixed_layout); } }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) { DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info, - act_info, data_layout); + act_info, data_layout, mixed_layout); } }; @@ -229,7 +336,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class DirectConvolutionValidationWithTensorShapesQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) { @@ -242,7 +348,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class DirectConvolutionValidationWithTensorShapesFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, ActivationLayerInfo act_info) { @@ -254,3 +359,5 @@ public: } // namespace validation } // namespace test } // namespace arm_compute + +#endif // ACL_TESTS_VALIDATION_FIXTURES_DIRECTCONVOLUTIONLAYERFIXTURE_H diff --git a/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h b/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h deleted file mode 100644 index 6ef30d3c21..0000000000 --- a/tests/validation/fixtures/DirectConvolutionLayerTensorShiftFixture.h +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright (c) 2017-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" -#include "tests/AssetsLibrary.h" -#include "tests/Globals.h" -#include "tests/IAccessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Fixture.h" -#include "tests/validation/Helpers.h" -#include "tests/validation/fixtures/ConvolutionLayerFixture.h" -#include "tests/validation/reference/ConvolutionLayer.h" - -#include <random> - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class DirectConvolutionValidationGenericTensorShiftFixture : public framework::Fixture -{ -public: - using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type; - -public: - template <typename...> - void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, - DataType data_type, QuantizationInfo quantization_info) - { - _quantization_info = quantization_info; - _data_type = data_type; - - const TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels); - const TensorShape bias_shape(num_kernels); - const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR); - const TensorShape output_shape = get_output_shape(input_shape, weights_shape, info); - const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info); - } - - template <typename...> - void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y, - DataType data_type, QuantizationInfo quantization_info) - { - ARM_COMPUTE_UNUSED(dilation_x, dilation_y); - - _quantization_info = quantization_info; - _data_type = data_type; - - const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info); - } - -protected: - template <typename U> - void fill(U &&tensor, int i) - { - switch(tensor.data_type()) - { - case DataType::QASYMM8: - { - std::uniform_int_distribution<uint8_t> distribution(0, 50); - library->fill(tensor, distribution, i); - break; - } - case DataType::F16: - { - arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f }; - library->fill(tensor, distribution, i); - break; - } - case DataType::F32: - { - std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); - library->fill(tensor, distribution, i); - break; - } - case DataType::S32: - { - std::uniform_int_distribution<int32_t> distribution(-5, 5); - library->fill(tensor, distribution, i); - break; - } - default: - library->fill_tensor_uniform(tensor, i); - } - } - - TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info) - { - // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info); - TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info); - TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info); - - TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info); - TensorType dst1 = create_tensor<TensorType>(output_shape1, data_type, 1, quantization_info); - - // Create and configure function - FunctionType conv; - conv.configure(&src, &weights, &bias, &dst, info); - FunctionType conv1; - conv1.configure(&dst, &weights, &bias, &dst1, info); - - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst1.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Allocate tensors - src.allocator()->allocate(); - weights.allocator()->allocate(); - bias.allocator()->allocate(); - dst.allocator()->allocate(); - dst1.allocator()->allocate(); - - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst1.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Fill tensors - fill(AccessorType(src), 0); - fill(AccessorType(weights), 1); - fill(AccessorType(bias), 2); - - // Compute NEConvolutionLayer function - GCScheduler::get().memory_barrier(); - conv.run(); - GCScheduler::get().memory_barrier(); - conv1.run(); - - return dst1; - } - - SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info) - { - // Create reference - SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info }; - SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info }; - SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info }; - - SimpleTensor<T> dst{ output_shape, data_type, 1, quantization_info }; - TensorShape output_shape1 = get_output_shape(output_shape, weights_shape, info); - - // Fill reference - fill(src, 0); - fill(weights, 1); - fill(bias, 2); - - dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info); - return reference::convolution_layer<T>(dst, weights, bias, output_shape1, info); - } - - TensorType _target{}; - SimpleTensor<T> _reference{}; - QuantizationInfo _quantization_info{}; - DataType _data_type{}; - -private: - TensorShape get_output_shape(TensorShape in_shape, TensorShape kernel_shape, const PadStrideInfo &info) - { - TensorShape out_shape(in_shape); - const std::pair<unsigned int, unsigned int> scaled_dims = scaled_dimensions(in_shape.x(), - in_shape.y(), - kernel_shape.x(), - kernel_shape.y(), - info); - out_shape.set(0, scaled_dims.first); - out_shape.set(1, scaled_dims.second); - out_shape.set(2, kernel_shape[3]); - return out_shape; - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class DirectConvolutionValidationTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type) - { - DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, - QuantizationInfo()); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class DirectConvolutionValidationQuantizedTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info) - { - DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, - quantization_info); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class DirectConvolutionValidationWithTensorShapesQuantizedTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y, - DataType data_type, QuantizationInfo quantization_info) - { - DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type, - quantization_info); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class DirectConvolutionValidationWithTensorShapesTensorShiftFixture : public DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, unsigned int dilation_x, unsigned int dilation_y, - DataType data_type) - { - DirectConvolutionValidationGenericTensorShiftFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation_x, dilation_y, data_type, - QuantizationInfo()); - } -}; - -} // namespace validation -} // namespace test -} // namespace arm_compute diff --git a/tests/validation/fixtures/DropoutLayerFixture.h b/tests/validation/fixtures/DropoutLayerFixture.h index 63df936032..a84f2a6407 100644 --- a/tests/validation/fixtures/DropoutLayerFixture.h +++ b/tests/validation/fixtures/DropoutLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class DropoutLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, float ratio, bool forward, DataType data_type) { _target = compute_target(shape, ratio, forward, data_type); @@ -70,17 +69,17 @@ protected: FunctionType dropout_layer; dropout_layer.configure(&src, &mask, &dst, ratio, forward); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); mask.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!mask.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!mask.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/ElementWiseUnaryFixture.h b/tests/validation/fixtures/ElementWiseUnaryFixture.h deleted file mode 100644 index 8cffef48f6..0000000000 --- a/tests/validation/fixtures/ElementWiseUnaryFixture.h +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (c) 2018-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE -#define ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE - -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" -#include "tests/AssetsLibrary.h" -#include "tests/Globals.h" -#include "tests/IAccessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Fixture.h" -#include "tests/validation/reference/ElementWiseUnary.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class ElementWiseUnaryValidationFixture : public framework::Fixture -{ -public: - template <typename...> - void setup(TensorShape input_shape, DataType input_data_type, bool in_place, ElementWiseUnary op) - { - _op = op; - _target = compute_target(input_shape, input_data_type, in_place); - _reference = compute_reference(input_shape, input_data_type); - } - -protected: - template <typename U> - void fill(U &&tensor, int i, DataType data_type) - { - using FloatType = typename std::conditional < std::is_same<T, half>::value || std::is_floating_point<T>::value, T, float >::type; - using FloatDistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<FloatType>>::type; - - switch(_op) - { - case ElementWiseUnary::EXP: - { - FloatDistributionType distribution{ FloatType(-1.0f), FloatType(1.0f) }; - library->fill(tensor, distribution, i); - break; - } - case ElementWiseUnary::RSQRT: - { - FloatDistributionType distribution{ FloatType(1.0f), FloatType(2.0f) }; - library->fill(tensor, distribution, i); - break; - } - case ElementWiseUnary::ABS: - case ElementWiseUnary::NEG: - { - switch(data_type) - { - case DataType::F16: - { - arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -2.0f, 2.0f }; - library->fill(tensor, distribution, i); - break; - } - case DataType::F32: - { - FloatDistributionType distribution{ FloatType(-2.0f), FloatType(2.0f) }; - library->fill(tensor, distribution, i); - break; - } - case DataType::S32: - { - std::uniform_int_distribution<int32_t> distribution(-100, 100); - library->fill(tensor, distribution, i); - break; - } - default: - ARM_COMPUTE_ERROR("DataType for Elementwise Negation Not implemented"); - } - break; - } - case ElementWiseUnary::LOG: - { - FloatDistributionType distribution{ FloatType(0.0000001f), FloatType(100.0f) }; - library->fill(tensor, distribution, i); - break; - } - case ElementWiseUnary::SIN: - { - FloatDistributionType distribution{ FloatType(-100.00f), FloatType(100.00f) }; - library->fill(tensor, distribution, i); - break; - } - case ElementWiseUnary::ROUND: - { - FloatDistributionType distribution{ FloatType(100.0f), FloatType(-100.0f) }; - library->fill(tensor, distribution, i); - break; - } - default: - ARM_COMPUTE_ERROR("Not implemented"); - } - } - - TensorType compute_target(const TensorShape &shape, DataType data_type, bool in_place) - { - // Create tensors - TensorType src = create_tensor<TensorType>(shape, data_type); - TensorType dst = create_tensor<TensorType>(shape, data_type); - - TensorType *actual_dst = in_place ? &src : &dst; - - // Create and configure function - FunctionType elwiseunary_layer; - elwiseunary_layer.configure(&src, actual_dst); - - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - src.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - if(!in_place) - { - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - } - - // Fill tensors - fill(AccessorType(src), 0, data_type); - - // Compute function - elwiseunary_layer.run(); - - if(in_place) - { - return src; - } - else - { - return dst; - } - } - - SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type) - { - // Create reference - SimpleTensor<T> src{ shape, data_type }; - - // Fill reference - fill(src, 0, data_type); - - return reference::elementwise_unary<T>(src, _op); - } - - TensorType _target{}; - SimpleTensor<T> _reference{}; - ElementWiseUnary _op{}; -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class RsqrtValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type) - { - ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::RSQRT); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class ExpValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type) - { - ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::EXP); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class NegValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type) - { - ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::NEG); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class NegValidationInPlaceFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type, bool in_place) - { - ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, in_place, ElementWiseUnary::NEG); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class LogValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type) - { - ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::LOG); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class AbsValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type) - { - ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ABS); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class SinValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type) - { - ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::SIN); - } -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class RoundValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> -{ -public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type) - { - ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ROUND); - } -}; -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE */ diff --git a/tests/validation/fixtures/ElementwiseOperationsFixture.h b/tests/validation/fixtures/ElementwiseOperationsFixture.h index dcb408c801..f36a1f75b7 100644 --- a/tests/validation/fixtures/ElementwiseOperationsFixture.h +++ b/tests/validation/fixtures/ElementwiseOperationsFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,11 +21,12 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_ELEMENTWISE_OPERATIONS_FIXTURE -#define ARM_COMPUTE_TEST_ELEMENTWISE_OPERATIONS_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_ELEMENTWISEOPERATIONSFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_ELEMENTWISEOPERATIONSFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" +#include "arm_compute/core/Validate.h" #include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/IAccessor.h" @@ -45,12 +46,14 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticOperationsGenericFixture : public framework::Fixture { public: - template <typename...> void setup(ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace = false, bool use_dynamic_shape = false) { - _op = op; + _op = op; + _use_dynamic_shape = use_dynamic_shape; + _is_inplace = is_inplace; + _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, qinfo0, qinfo1, qinfo_out); _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, qinfo0, qinfo1, qinfo_out); } @@ -83,26 +86,67 @@ protected: QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) { // Create tensors - TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0); - TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1); - TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out); + const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1); + TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0); + TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1); + TensorType dst = create_tensor<TensorType>(out_shape, output_data_type, 1, qinfo_out); + + // Check whether do in-place computation and whether inputs are broadcast compatible + TensorType *actual_dst = &dst; + if(_is_inplace) + { + bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out) && (data_type0 == output_data_type); + bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out) && (data_type1 == output_data_type); + bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace); + ARM_COMPUTE_ASSERT(do_in_place); + + if(src1_is_inplace) + { + actual_dst = &ref_src1; + } + else + { + actual_dst = &ref_src2; + } + } + + // if _use_dynamic_shape is true, this fixture will test scenario for dynamic shapes. + // - At configure time, all input tensors are marked as dynamic using set_tensor_dynamic() + // - After configure, tensors are marked as static for run using set_tensor_static() + // - The tensors with static shape are given to run() + if(_use_dynamic_shape) + { + set_tensor_dynamic(ref_src1); + set_tensor_dynamic(ref_src2); + } // Create and configure function FunctionType elem_op; - elem_op.configure(&ref_src1, &ref_src2, &dst); + elem_op.configure(&ref_src1, &ref_src2, actual_dst); - ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + if(_use_dynamic_shape) + { + set_tensor_static(ref_src1); + set_tensor_static(ref_src2); + } + + ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable()); // Allocate tensors ref_src1.allocator()->allocate(); ref_src2.allocator()->allocate(); - dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + // If don't do in-place computation, still need to allocate original dst + if(!_is_inplace) + { + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + dst.allocator()->allocate(); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + } + + ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable()); // Fill tensors fill(AccessorType(ref_src1), 0); @@ -111,7 +155,7 @@ protected: // Compute function elem_op.run(); - return dst; + return std::move(*actual_dst); } SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, @@ -133,6 +177,8 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; ArithmeticOperation _op{ ArithmeticOperation::ADD }; + bool _use_dynamic_shape{ false }; + bool _is_inplace{ false }; }; // Arithmetic operation fused with activation function @@ -140,15 +186,15 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticOperationsFuseActivationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info, bool is_inplace = true) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(op, shape0, shape1, data_type0, data_type1, output_data_type, - qinfo0, qinfo1, qinfo_out); - _act_info = act_info; + qinfo0, qinfo1, qinfo_out, is_inplace); + _act_info = act_info; + _is_inplace = is_inplace; } protected: @@ -156,26 +202,51 @@ protected: QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) { // Create tensors - TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0); - TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1); - TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out); + const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1); + TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, qinfo0); + TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, qinfo1); + TensorType dst = create_tensor<TensorType>(out_shape, output_data_type, 1, qinfo_out); + + // Check whether do in-place computation and whether inputs are broadcast compatible + TensorType *actual_dst = &dst; + if(_is_inplace) + { + bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out) && (data_type0 == output_data_type); + bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out) && (data_type1 == output_data_type); + bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace); + ARM_COMPUTE_ASSERT(do_in_place); + + if(src1_is_inplace) + { + actual_dst = &ref_src1; + } + else + { + actual_dst = &ref_src2; + } + } // Create and configure function FunctionType elem_op; - elem_op.configure(&ref_src1, &ref_src2, &dst, _act_info); + elem_op.configure(&ref_src1, &ref_src2, actual_dst, _act_info); - ARM_COMPUTE_EXPECT(ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable()); // Allocate tensors ref_src1.allocator()->allocate(); ref_src2.allocator()->allocate(); - dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!ref_src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!ref_src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + // If don't do in-place computation, still need to allocate original dst + if(!_is_inplace) + { + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + dst.allocator()->allocate(); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + } + + ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable()); // Fill tensors fill(AccessorType(ref_src1), 0); @@ -184,7 +255,7 @@ protected: // Compute function elem_op.run(); - return dst; + return std::move(*actual_dst); } SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, @@ -197,18 +268,18 @@ protected: } ActivationLayerInfo _act_info{}; + bool _is_inplace{ false }; }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T> class ArithmeticDivisionBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape0, shape1, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace); } }; @@ -216,12 +287,35 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticDivisionValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ArithmeticDivisionBroadcastDynamicShapeValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) + { + ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape0, shape1, + data_type0, data_type1, output_data_type, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace, true /* use_dynamic_shape */); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ArithmeticDivisionDynamicShapeValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) + { + ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape, + data_type0, data_type1, output_data_type, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace, true /* use_dynamic_shape */); } }; @@ -229,12 +323,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticDivisionBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) { ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape0, shape1, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -242,12 +335,23 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticDivisionValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) { ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ArithmeticDivisionValidationIntegerFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) + { + ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape, + data_type0, data_type1, output_data_type, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -255,14 +359,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ArithmeticDivisionValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::DIV, shape, shape, data_type0, data_type1, output_data_type, - qinfo0, qinfo1, qinfo_out); + qinfo0, qinfo1, qinfo_out, is_inplace); } }; @@ -270,12 +373,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMaxBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape0, shape1, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace); } }; @@ -283,12 +385,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMaxValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape, shape, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace); } }; @@ -296,12 +397,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMaxBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) { ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape0, shape1, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -309,12 +409,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMaxValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) { ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape, shape, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -322,14 +421,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMaxValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape, shape, data_type0, data_type1, output_data_type, - qinfo0, qinfo1, qinfo_out); + qinfo0, qinfo1, qinfo_out, is_inplace); } }; @@ -337,14 +435,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMaxQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MAX, shape0, shape1, data_type0, data_type1, output_data_type, - qinfo0, qinfo1, qinfo_out); + qinfo0, qinfo1, qinfo_out, is_inplace); } }; @@ -352,12 +449,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMinBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape0, shape1, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace); } }; @@ -365,12 +461,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMinValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape, shape, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace); } }; @@ -378,12 +473,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMinBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) { ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape0, shape1, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -391,12 +485,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMinValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) { ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape, shape, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -404,14 +497,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMinValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape, shape, data_type0, data_type1, output_data_type, - qinfo0, qinfo1, qinfo_out); + qinfo0, qinfo1, qinfo_out, is_inplace); } }; @@ -419,14 +511,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseMinQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::MIN, shape0, shape1, data_type0, data_type1, output_data_type, - qinfo0, qinfo1, qinfo_out); + qinfo0, qinfo1, qinfo_out, is_inplace); } }; @@ -434,12 +525,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseSquaredDiffBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape0, shape1, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace); } }; @@ -447,12 +537,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseSquaredDiffValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape, shape, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace); } }; @@ -460,12 +549,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseSquaredDiffBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) { ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape0, shape1, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -473,12 +561,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseSquaredDiffValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) { ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape, shape, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -486,14 +573,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseSquaredDiffValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape, shape, data_type0, data_type1, output_data_type, - qinfo0, qinfo1, qinfo_out); + qinfo0, qinfo1, qinfo_out, is_inplace); } }; @@ -501,14 +587,13 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwiseSquaredDiffQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, - QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::SQUARED_DIFF, shape0, shape1, data_type0, data_type1, output_data_type, - qinfo0, qinfo1, qinfo_out); + qinfo0, qinfo1, qinfo_out, is_inplace); } }; @@ -516,7 +601,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PReluLayerBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::PRELU, shape0, shape1, @@ -529,7 +613,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PReluLayerValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::PRELU, shape, shape, @@ -542,7 +625,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PReluLayerValidationQuantizedFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) @@ -557,7 +639,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PReluLayerQuantizedBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) @@ -572,12 +653,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwisePowerBroadcastValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape0, shape1, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace); } }; @@ -585,12 +665,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwisePowerValidationFixture : public ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace) { ArithmeticOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape, shape, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), is_inplace); } }; @@ -598,12 +677,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwisePowerBroadcastValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info) + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) { ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape0, shape1, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; @@ -611,16 +689,15 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ElementwisePowerValidationFloatFixture : public ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ActivationLayerInfo act_info, bool is_inplace) { ArithmeticOperationsFuseActivationFixture<TensorType, AccessorType, FunctionType, T>::setup(ArithmeticOperation::POWER, shape, shape, data_type0, data_type1, output_data_type, - QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); } }; } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_ARITHMETIC_OPERATIONS_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_ELEMENTWISEOPERATIONSFIXTURE_H diff --git a/tests/validation/fixtures/ElementwiseUnaryFixture.h b/tests/validation/fixtures/ElementwiseUnaryFixture.h new file mode 100644 index 0000000000..15344288db --- /dev/null +++ b/tests/validation/fixtures/ElementwiseUnaryFixture.h @@ -0,0 +1,447 @@ +/* + * Copyright (c) 2018-2021, 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE +#define ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE + +#include "arm_compute/core/QuantizationInfo.h" +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/reference/ElementwiseUnary.h" + +#include <tuple> +#include <limits> +#include <type_traits> +#include <vector> + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ElementWiseUnaryValidationFixture : public framework::Fixture +{ +public: + void setup(TensorShape input_shape, DataType input_data_type, bool in_place, ElementWiseUnary op, + bool use_dynamic_shape = false, QuantizationInfo qinfo = QuantizationInfo(), QuantizationInfo qinfo_out = QuantizationInfo()) + { + _op = op; + _target = compute_target(input_shape, input_data_type, in_place, qinfo, qinfo_out); + _reference = compute_reference(input_shape, input_data_type, qinfo, qinfo_out); + _use_dynamic_shape = use_dynamic_shape; + } + +protected: + template <typename U> + void fill(U &&tensor, int i, DataType data_type) + { + using FloatType = typename std::conditional < std::is_same<T, half>::value || std::is_floating_point<T>::value, T, float >::type; + using FloatDistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<FloatType>>::type; + + switch(_op) + { + case ElementWiseUnary::EXP: + { + switch(data_type) + { + case DataType::F32: + { + FloatDistributionType distribution{ FloatType(-86.63f), FloatType(88.36f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::F16: + { + FloatDistributionType distribution{ FloatType(-9.00f), FloatType(10.73f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + + default: + ARM_COMPUTE_ERROR("Not implemented"); + } + + break; + } + case ElementWiseUnary::RSQRT: + case ElementWiseUnary::LOG: + { + // For floating-point data type, the chosen input range is all positive numbers + // (i.e. positive and negative zeros are excluded). + switch(data_type) + { + case DataType::F32: + { + FloatDistributionType distribution{ std::numeric_limits<float>::min(), std::numeric_limits<float>::max() }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::F16: + { + FloatDistributionType distribution{ FloatType(0.00006103515625f), FloatType(65504.0f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + + default: + ARM_COMPUTE_ERROR("Not implemented"); + } + + break; + } + case ElementWiseUnary::SIN: + { + switch(data_type) + { + case DataType::F32: + case DataType::F16: + { + FloatDistributionType distribution{ FloatType(-100.0f), FloatType(100.0f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::S32: + { + std::uniform_int_distribution<int32_t> distribution(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max()); + library->fill(tensor, distribution, i); + break; + } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + + default: + ARM_COMPUTE_ERROR("Not implemented"); + } + + break; + } + case ElementWiseUnary::ABS: + case ElementWiseUnary::NEG: + case ElementWiseUnary::ROUND: + { + switch(data_type) + { + case DataType::F32: + { + FloatDistributionType distribution{ std::numeric_limits<float>::lowest() / 2, std::numeric_limits<float>::max() / 2 }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::F16: + { + FloatDistributionType distribution{ FloatType(-65504.0f), FloatType(65504.0f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::S32: + { + std::uniform_int_distribution<int32_t> distribution(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max()); + library->fill(tensor, distribution, i); + break; + } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + + default: + ARM_COMPUTE_ERROR("Not implemented"); + } + + break; + } + default: + ARM_COMPUTE_ERROR("Not implemented"); + } + } + + TensorType compute_target(const TensorShape &shape, DataType data_type, bool in_place, QuantizationInfo qinfo, QuantizationInfo qinfo_out) + { + // Create tensors + TensorType src = create_tensor<TensorType>(shape, data_type, 1, qinfo); + TensorType dst = create_tensor<TensorType>(shape, data_type, 1, qinfo_out); + TensorType *actual_dst = in_place ? &src : &dst; + + // if _use_dynamic_shape is true, this fixture will test scenario for dynamic shapes. + // - At configure time, all input tensors are marked as dynamic using set_tensor_dynamic() + // - After configure, tensors are marked as static for run using set_tensor_static() + // - The tensors with static shape are given to run() + if(_use_dynamic_shape) + { + set_tensor_dynamic(src); + } + + // Create and configure function + FunctionType elwiseunary_layer; + elwiseunary_layer.configure(&src, actual_dst); + + if(_use_dynamic_shape) + { + set_tensor_static(src); + } + + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + src.allocator()->allocate(); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + if(!in_place) + { + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + dst.allocator()->allocate(); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + } + + // Fill tensors + fill(AccessorType(src), 0, data_type); + + // Compute function + elwiseunary_layer.run(); + + if(in_place) + { + return src; + } + else + { + return dst; + } + } + + SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, QuantizationInfo qinfo, QuantizationInfo qinfo_out) + { + // Create reference + SimpleTensor<T> src{ shape, data_type, 1, qinfo }; + SimpleTensor<T> dst{ shape, data_type, 1, qinfo_out }; + + // Fill reference + fill(src, 0, data_type); + + return reference::elementwise_unary<T>(src, dst, _op); + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; + ElementWiseUnary _op{}; + bool _use_dynamic_shape{ false }; + QuantizationInfo _input_qinfo{}; + QuantizationInfo _output_qinfo{}; +}; +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class RsqrtQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo qinfo, QuantizationInfo qinfo_out) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::RSQRT, false, qinfo, qinfo_out); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class RsqrtValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::RSQRT); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class RsqrtDynamicShapeValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::RSQRT, true); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ExpValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::EXP); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ExpQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::EXP, false, iq, oq); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class NegValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::NEG); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class NegQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::NEG, false, iq, oq); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class NegValidationInPlaceFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type, bool in_place) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, in_place, ElementWiseUnary::NEG); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class NegQuantizedValidationInPlaceFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type, bool in_place, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, in_place, ElementWiseUnary::NEG, false, iq, oq); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class LogValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::LOG); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class LogQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::LOG, false, iq, oq); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class AbsValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ABS); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class AbsQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ABS, false, iq, oq); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class SinValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::SIN); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class SinQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::SIN, false, iq, oq); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class RoundValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ROUND); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class RoundQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ROUND, false, iq, oq); + } +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE */ diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h index 86a97272a0..024227b22a 100644 --- a/tests/validation/fixtures/FFTFixture.h +++ b/tests/validation/fixtures/FFTFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class FFTValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type) { _target = compute_target(shape, data_type); @@ -88,15 +87,17 @@ protected: FunctionType fft; fft.configure(&src, &dst, InfoType()); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + add_padding_x({ &src, &dst }); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -132,18 +133,32 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class FFTConvolutionValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, - DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) + DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false) { - _data_type = data_type; - _data_layout = data_layout; + _mixed_layout = mixed_layout; + _data_type = data_type; + _data_layout = data_layout; _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info); _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info); } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(_data_layout); + dst.info()->set_data_layout(_data_layout); + } + template <typename U> void fill(U &&tensor, int i) { @@ -185,14 +200,16 @@ protected: TensorType bias = create_tensor<TensorType>(bias_shape, _data_type, 1, QuantizationInfo(), _data_layout); TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, QuantizationInfo(), _data_layout); + add_padding_x({ &src, &weights, &bias, &dst }, _data_layout); + // Create and configure function FunctionType conv; conv.configure(&src, &weights, &bias, &dst, info, act_info, _data_type == DataType::F16); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); @@ -200,19 +217,25 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); fill(AccessorType(weights), 1); fill(AccessorType(bias), 2); - // Compute convolution function - conv.run(); - + if(_mixed_layout) + { + mix_layout(conv, src, dst); + } + else + { + // Compute Convolution function + conv.run(); + } return dst; } @@ -239,18 +262,18 @@ protected: SimpleTensor<T> _reference{}; DataType _data_type{}; DataLayout _data_layout{}; + bool _mixed_layout{ false }; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class FFTConvolutionValidationFixture : public FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) { FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, - data_type, data_layout, act_info); + data_type, data_layout, act_info, mixed_layout); } }; } // namespace validation diff --git a/tests/validation/fixtures/FillFixture.h b/tests/validation/fixtures/FillFixture.h index 706c13565d..0239a68903 100644 --- a/tests/validation/fixtures/FillFixture.h +++ b/tests/validation/fixtures/FillFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2019, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -42,7 +42,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class FillFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, DataType data_type) { _target = compute_target(input_shape, data_type); diff --git a/tests/validation/fixtures/FlattenLayerFixture.h b/tests/validation/fixtures/FlattenLayerFixture.h index 67c4d2a2b1..e72487c7cf 100644 --- a/tests/validation/fixtures/FlattenLayerFixture.h +++ b/tests/validation/fixtures/FlattenLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -50,7 +50,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class FlattenLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type) { TensorShape shape_flatten; @@ -83,15 +82,15 @@ protected: FunctionType flatten_layer; flatten_layer.configure(&src, &dst); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/FloorFixture.h b/tests/validation/fixtures/FloorFixture.h index 9388486983..7d38666f47 100644 --- a/tests/validation/fixtures/FloorFixture.h +++ b/tests/validation/fixtures/FloorFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class FloorValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type) { _target = compute_target(shape, data_type); @@ -68,15 +67,15 @@ protected: FunctionType floor_func; floor_func.configure(&src, &dst); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h index 3760cfb8b7..344187868f 100644 --- a/tests/validation/fixtures/FullyConnectedLayerFixture.h +++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE -#define ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_FULLYCONNECTEDLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_FULLYCONNECTEDLAYERFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -34,6 +34,7 @@ #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" #include "tests/validation/Helpers.h" +#include "tests/validation/Validation.h" #include "tests/validation/reference/ActivationLayer.h" #include "tests/validation/reference/FullyConnectedLayer.h" #include "tests/validation/reference/Utils.h" @@ -54,16 +55,63 @@ public: using TBias = typename std::conditional < (std::is_same<TDecay, uint8_t>::value || std::is_same<TDecay, int8_t>::value), int32_t, T >::type; public: - template <typename...> + void setup_quantization(TensorShape weights_shape, TensorShape output_shape, QuantizationInfo &input_q_info, QuantizationInfo &weights_q_info, DataType data_type) + { + _hash = weights_shape[0] + weights_shape[1] + output_shape[0] + output_shape[1]; + const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max()); + const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min()); + + std::mt19937 generator(library->seed() + _hash); + std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f); + std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max); + + const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const int32_t offset_lhs = distribution_t(generator); + const int32_t offset_rhs = distribution_t(generator); + + input_q_info = QuantizationInfo(scale_lhs, offset_lhs); + weights_q_info = QuantizationInfo(scale_rhs, offset_rhs); + + + const int k = weights_shape.x(); + QuantizationHint q_hint = suggest_mac_dst_q_info_and_bias(input_q_info, weights_q_info, k, data_type, 0.1f /* bias_fraction */, 4 /* number of standard deviations*/); + + _dst_q_info = q_hint.q_info; + _min_bias = q_hint.bias_min; + _max_bias = q_hint.bias_max; + + // Do not change here as these limits are the natural limits of the associated data types and + // are embedded in the computation of the dst quantization info. + _min_u8 = 0; + _max_u8 = 255; + _min_s8 = -128; + _max_s8 = 127; + } + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, - DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info) + DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info, bool mixed_layout = false) { ARM_COMPUTE_UNUSED(weights_shape); ARM_COMPUTE_UNUSED(bias_shape); + _mixed_layout = mixed_layout; _data_type = data_type; _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - _quantization_info = quantization_info; + + // Note : Quantization Info parameter from setup function is only used when quant datatype and activation function is not enabled or is identity. + if(is_data_type_quantized(data_type) && (!activation_info.enabled() || activation_info.activation() == ActivationFunction::IDENTITY)) + { + // Initialises quantization info with appropriate scale and offset for given input shapes. + setup_quantization(weights_shape, output_shape,_input_q_info, _weight_q_info, data_type); + } + else + { + _input_q_info = quantization_info; + _weight_q_info = quantization_info; + _dst_q_info = quantization_info; + } + _activation_info = activation_info; _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights); @@ -71,22 +119,37 @@ public: } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout = src.info()->data_layout(); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout); + dst.info()->set_data_layout(data_layout); + } + template <typename U> void fill(U &&tensor, int i) { if(_data_type == DataType::QASYMM8) { - std::uniform_int_distribution<uint8_t> distribution(0, 30); + std::uniform_int_distribution<uint32_t> distribution(_min_u8, _max_u8); library->fill(tensor, distribution, i); } else if(_data_type == DataType::QASYMM8_SIGNED) { - std::uniform_int_distribution<int8_t> distribution(-15, 15); + std::uniform_int_distribution<int32_t> distribution(_min_s8, _max_s8); library->fill(tensor, distribution, i); } else if(_data_type == DataType::S32) { - std::uniform_int_distribution<int32_t> distribution(-50, 50); + std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias); library->fill(tensor, distribution, i); } else if(_data_type == DataType::F16) @@ -128,10 +191,10 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info); - TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info); - TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info); - TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info); + TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _input_q_info); + TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _weight_q_info); + TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1); + TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _dst_q_info); // Create Fully Connected layer info FullyConnectedLayerInfo fc_info; @@ -143,10 +206,12 @@ protected: FunctionType fc; fc.configure(&src, &weights, &bias, &dst, fc_info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + add_padding_x({ &src, &weights, &bias, &dst }); // Allocate tensors src.allocator()->allocate(); @@ -154,14 +219,14 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors - fill(AccessorType(src), 0); - fill(AccessorType(bias), 2); + fill(AccessorType(src), 0 + _hash); + fill(AccessorType(bias), 2 + _hash); if(!reshape_weights || !transpose_weights) { @@ -169,7 +234,7 @@ protected: RawTensor tmp(tmp_shape, _data_type, 1); // Fill with original shape - fill(tmp, 1); + fill(tmp, 1 + _hash); // Transpose elementwise tmp = transpose(tmp); @@ -186,11 +251,18 @@ protected: } else { - fill(AccessorType(weights), 1); + fill(AccessorType(weights), 1 + _hash); } - // Compute NEFullyConnectedLayer function - fc.run(); + if(_mixed_layout) + { + mix_layout(fc, src, dst); + } + else + { + // Compute NEFullyConnectedLayer function + fc.run(); + } return dst; } @@ -198,54 +270,384 @@ protected: SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape) { // Create reference - SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info }; - SimpleTensor<T> weights{ weights_shape, _data_type, 1, _quantization_info }; - SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info }; + SimpleTensor<T> src{ input_shape, _data_type, 1, _input_q_info }; + SimpleTensor<T> weights{ weights_shape, _data_type, 1, _weight_q_info }; + SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, QuantizationInfo() }; // Fill reference - fill(src, 0); - fill(weights, 1); - fill(bias, 2); + fill(src, 0 + _hash); + fill(weights, 1 + _hash); + fill(bias, 2 + _hash); - return reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, output_shape), _activation_info, _quantization_info); + return reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, output_shape, _dst_q_info), _activation_info, _dst_q_info); } TensorType _target{}; SimpleTensor<T> _reference{}; DataType _data_type{}; DataType _bias_data_type{}; - QuantizationInfo _quantization_info{}; + bool _mixed_layout{ false }; + QuantizationInfo _input_q_info{}; + QuantizationInfo _weight_q_info{}; + QuantizationInfo _dst_q_info{}; ActivationLayerInfo _activation_info{}; + + // Random initialization limits + // Default values are previously handcrafted limits + // that sould be used when we don't use dynamic quantization + int32_t _min_bias{-50}; + int32_t _max_bias{50}; + + int32_t _min_u8{0}; + int32_t _max_u8{30}; + int32_t _min_s8{-15}; + int32_t _max_s8{15}; + int _hash{0}; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class FullyConnectedLayerValidationFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type, ActivationLayerInfo activation_info) { FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights, data_type, - QuantizationInfo(), activation_info); + QuantizationInfo(), activation_info, mixed_layout); } }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class FullyConnectedLayerValidationQuantizedFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info) { FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights, data_type, - quantization_info, activation_info); + quantization_info, activation_info, mixed_layout); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class FullyConnectedWithDynamicTensorsFixture : public framework::Fixture +{ +private: + template <typename U> + void fill(U &&tensor, int i) + { + if(_data_type == DataType::F16) + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, i); + } + else if(_data_type == DataType::F32) + { + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, i); + } + else if(_data_type == DataType::QASYMM8) + { + std::uniform_int_distribution<uint32_t> distribution(_min_u8, _max_u8); + library->fill(tensor, distribution, i); + } + else if(_data_type == DataType::QASYMM8_SIGNED) + { + std::uniform_int_distribution<int32_t> distribution(_min_s8, _max_s8); + library->fill(tensor, distribution, i); + } + else if(_data_type == DataType::S32) + { + std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias); + library->fill(tensor, distribution, i); + } + else + { + library->fill_tensor_uniform(tensor, i); + } + } + + void fill_transposed_weights(TensorType &weights, TensorShape weights_shape, int seed) + { + RawTensor tmp(weights_shape, _data_type, 1); + + // Fill with original shape + fill(tmp, seed); + + // Transpose elementwise + tmp = transpose(tmp); + + AccessorType weights_accessor(weights); + + for(int i = 0; i < tmp.num_elements(); ++i) + { + Coordinates coord = index2coord(tmp.shape(), i); + std::copy_n(static_cast<const RawTensor::value_type *>(tmp(coord)), + tmp.element_size(), + static_cast<RawTensor::value_type *>(weights_accessor(coord))); + } + } + + void validate_with_tolerance(TensorType &target, SimpleTensor<float> &ref) + { + constexpr RelativeTolerance<float> rel_tolerance_f32(0.01f); + constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.001f); + validate(AccessorType(target), ref, rel_tolerance_f32, 0, abs_tolerance_f32); + } + + void validate_with_tolerance(TensorType &target, SimpleTensor<half_float::half> &ref) + { + constexpr AbsoluteTolerance<float> abs_tolerance_f16(0.3f); + const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); + constexpr float tolerance_num_f16 = 0.07f; + + validate(AccessorType(target), ref, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16); + } + + void validate_with_tolerance(TensorType &target, SimpleTensor<uint8_t> &ref) + { + constexpr AbsoluteTolerance<uint32_t> tolerance_qasymm8(1); + validate(AccessorType(target), ref, tolerance_qasymm8); + } + + void validate_with_tolerance(TensorType &target, SimpleTensor<int8_t> &ref) + { + constexpr AbsoluteTolerance<int32_t> tolerance_qasymm8_signed(1); + validate(AccessorType(target), ref, tolerance_qasymm8_signed); + } + + void setup_quantization(TensorShape weights_shape, TensorShape output_shape, QuantizationInfo &input_q_info, QuantizationInfo &weights_q_info, DataType data_type) + { + _hash = weights_shape[0] + weights_shape[1] + output_shape[0] + output_shape[1]; + + const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max()); + const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min()); + + std::mt19937 generator(library->seed() + _hash); + std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f); + std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max); + + const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const int32_t offset_lhs = distribution_t(generator); + const int32_t offset_rhs = distribution_t(generator); + + input_q_info = QuantizationInfo(scale_lhs, offset_lhs); + weights_q_info = QuantizationInfo(scale_rhs, offset_rhs); + + const int k = weights_shape.x(); + QuantizationHint q_hint = suggest_mac_dst_q_info_and_bias(input_q_info, weights_q_info, k, data_type, 0.1f /* bias_fraction */, 4 /* number of standard deviations*/); + + _dst_q_info = q_hint.q_info; + _min_bias = q_hint.bias_min; + _max_bias = q_hint.bias_max; + + // Do not change here as these limits are the natural limits of the associated data types and + // are embedded in the computation of the dst quantization info. + _min_u8 = 0; + _max_u8 = 255; + _min_s8 = -128; + _max_s8 = 127; + } + +public: + using TDecay = typename std::decay<T>::type; + using TBias = typename std::conditional < (std::is_same<TDecay, uint8_t>::value || std::is_same<TDecay, int8_t>::value), int32_t, T >::type; + + void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape, + DataType data_type, ActivationLayerInfo activation_info, bool constant_weights, bool constant_bias, bool weights_reshaped, bool remove_bias = false) + { + _data_type = data_type; + + const bool is_quantized = is_data_type_quantized(data_type); + const DataType bias_data_type = (is_quantized) ? DataType::S32 : data_type; + + if (is_quantized && (!activation_info.enabled() || activation_info.activation() == ActivationFunction::IDENTITY)) + { + setup_quantization(weights_shape, dst_shape, _src_q_info, _weights_q_info, data_type); + } + else + { + _src_q_info = QuantizationInfo(0.1f, 10); + _dst_q_info = QuantizationInfo(0.3f, 20); + _weights_q_info = QuantizationInfo(0.2f, 5); + } + + // Configure TensorInfo Objects + const TensorInfo src_info(src_shape, 1, data_type, _src_q_info); + const TensorInfo dst_info(dst_shape, 1, data_type, _dst_q_info); + TensorInfo bias_info(bias_shape, 1, bias_data_type); + TensorInfo wei_info(weights_shape, 1, data_type, _weights_q_info); + + if(!constant_weights && weights_reshaped) + { + const TensorShape tr_weights_shape{ weights_shape[1], weights_shape[0] }; + wei_info.set_tensor_shape(tr_weights_shape); + } + wei_info.set_are_values_constant(constant_weights); + bias_info.set_are_values_constant(constant_bias); + + // Initialise Tensors + _src.allocator()->init(src_info); + _weights.allocator()->init(wei_info); + if(!remove_bias) + _bias.allocator()->init(bias_info); + _dst.allocator()->init(dst_info); + + // Configure FC layer and mark the weights as non constant + FullyConnectedLayerInfo fc_info; + fc_info.activation_info = activation_info; + if(!constant_weights) + { + fc_info.are_weights_reshaped = weights_reshaped; + fc_info.transpose_weights = !weights_reshaped; + } + FunctionType fc; + fc.configure(&_src, &_weights, (remove_bias) ? nullptr : &_bias, &_dst, fc_info); + + // Allocate all the tensors + _src.allocator()->allocate(); + _weights.allocator()->allocate(); + if(!remove_bias) + _bias.allocator()->allocate(); + _dst.allocator()->allocate(); + + // Run multiple iterations with different inputs + constexpr int num_iterations = 5; + int randomizer_offset = 0; + + // Create reference tensors + SimpleTensor<T> src{ src_shape, data_type, 1, _src_q_info }; + SimpleTensor<T> weights{ weights_shape, data_type, 1, _weights_q_info }; + SimpleTensor<TBias> bias{ bias_shape, bias_data_type }; + + // Fill weights and/or bias if they remain constant + if(constant_weights) + { + fill(AccessorType(_weights), 1 + _hash); + fill(weights, 1 + _hash); + } + if(constant_bias && !remove_bias) + { + fill(AccessorType(_bias), 2 + _hash); + fill(bias, 2 + _hash); + } + // To remove bias, fill with 0 + if(remove_bias && is_quantized) + { + library->fill_tensor_value(bias, 0); + } + else if(remove_bias) + { + library->fill_tensor_value(bias, (float)0.0); + } + + for(int i = 0; i < num_iterations; ++i) + { + // Run target + { + fill(AccessorType(_src), randomizer_offset); + if(!constant_weights) + { + if(weights_reshaped) + { + fill_transposed_weights(_weights, weights_shape, randomizer_offset + 1 + _hash); + } + else + { + fill(AccessorType(_weights), randomizer_offset + 1 +_hash); + } + } + if(!constant_bias && !remove_bias) + { + fill(AccessorType(_bias), randomizer_offset + 2 + _hash); + } + + fc.run(); + } + + // Run reference and compare + { + // Fill reference + fill(src, randomizer_offset); + if(!constant_weights) + { + fill(weights, randomizer_offset + 1 + _hash); + } + if(!constant_bias && !remove_bias) + { + fill(bias, randomizer_offset + 2 + _hash); + } + + auto dst = reference::activation_layer(reference::fully_connected_layer<T>(src, weights, bias, dst_shape, _dst_q_info), activation_info, _dst_q_info); + + // Validate + validate_with_tolerance(_dst, dst); + } + + randomizer_offset += 100; + } + } + +private: + TensorType _src{}, _weights{}, _bias{}, _dst{}; + DataType _data_type{ DataType::UNKNOWN }; + + QuantizationInfo _src_q_info{}; + QuantizationInfo _weights_q_info{}; + QuantizationInfo _dst_q_info{}; + + // Random initialization limits + // Default values are previously handcrafted limits + // that sould be used when we don't use dynamic quantization + int32_t _min_bias{-50}; + int32_t _max_bias{50}; + + int32_t _min_u8{0}; + int32_t _max_u8{30}; + int32_t _min_s8{-15}; + int32_t _max_s8{15}; + int _hash{0}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class FullyConnectedWithDynamicWeightsFixture : public FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape, + DataType data_type, ActivationLayerInfo activation_info, bool weights_reshaped) + { + FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, weights_shape, bias_shape, + dst_shape, data_type, activation_info, false, true, weights_reshaped, false); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class FullyConnectedDynamicNoBiasFixture : public FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape, + DataType data_type, ActivationLayerInfo activation_info, bool weights_reshaped) + { + FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, weights_shape, bias_shape, + dst_shape, data_type, activation_info, false, true, weights_reshaped, true); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class FullyConnectedWithDynamicBiasFixture : public FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape, + DataType data_type, ActivationLayerInfo activation_info) + { + FullyConnectedWithDynamicTensorsFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, weights_shape, bias_shape, + dst_shape, data_type, activation_info, true, false, false, false); } }; } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_FULLYCONNECTEDLAYERFIXTURE_H diff --git a/tests/validation/fixtures/FuseBatchNormalizationFixture.h b/tests/validation/fixtures/FuseBatchNormalizationFixture.h index 552dc7c360..a05e4169a7 100644 --- a/tests/validation/fixtures/FuseBatchNormalizationFixture.h +++ b/tests/validation/fixtures/FuseBatchNormalizationFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, int class FuseBatchNormalizationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool in_place, bool with_bias, bool with_gamma, bool with_beta) { std::tie(_target_w, _target_b) = compute_target(shape_w, data_type, data_layout, in_place, with_bias, with_gamma, with_beta); @@ -96,14 +95,14 @@ protected: FunctionType fuse_batch_normalization; fuse_batch_normalization.configure(&w, &mean, &var, w_fused_to_use, b_fused_to_use, b_to_use, beta_to_use, gamma_to_use, _epsilon, fuse_bn_type); - ARM_COMPUTE_EXPECT(w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(mean.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(var.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(w_fused.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(b_fused.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(beta.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(gamma.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(mean.info()->is_resizable()); + ARM_COMPUTE_ASSERT(var.info()->is_resizable()); + ARM_COMPUTE_ASSERT(w_fused.info()->is_resizable()); + ARM_COMPUTE_ASSERT(b_fused.info()->is_resizable()); + ARM_COMPUTE_ASSERT(beta.info()->is_resizable()); + ARM_COMPUTE_ASSERT(gamma.info()->is_resizable()); // Allocate tensors w.allocator()->allocate(); @@ -115,14 +114,14 @@ protected: beta.allocator()->allocate(); gamma.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!mean.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!var.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!w_fused.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!b_fused.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!beta.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!gamma.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!mean.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!var.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!w_fused.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!b_fused.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!beta.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!gamma.info()->is_resizable()); // Fill tensors fill(AccessorType(w), 0U, -1.0f, 1.0f); diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h index 500e094e18..94bedc83e1 100644 --- a/tests/validation/fixtures/GEMMFixture.h +++ b/tests/validation/fixtures/GEMMFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_GEMM_FIXTURE -#define ARM_COMPUTE_TEST_GEMM_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_GEMMFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_GEMMFIXTURE_H #include "arm_compute/core/KernelDescriptors.h" #include "arm_compute/core/TensorShape.h" @@ -34,6 +34,7 @@ #include "tests/framework/Fixture.h" #include "tests/validation/Helpers.h" #include "tests/validation/reference/ActivationLayer.h" +#include "tests/validation/reference/ElementwiseOperations.h" #include "tests/validation/reference/GEMM.h" #include <random> @@ -44,16 +45,15 @@ namespace test { namespace validation { -template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool disable_c = false, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false> -class GEMMValidationFixture : public framework::Fixture +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool disable_c = false, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool pretranspose_a = false, bool pretranspose_b = false, bool run_twice = false> +class GEMMGenericValidationFixture : public framework::Fixture { public: - template <typename...> - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type) + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type, bool accumulate=false) { ARM_COMPUTE_UNUSED(pretranspose); - _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type); - _reference = compute_reference(shape_a, shape_b, output_shape, alpha, beta, data_type); + _target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, accumulate); + _reference = compute_reference(shape_a, shape_b, output_shape, alpha, beta, data_type, accumulate); } protected: @@ -80,7 +80,7 @@ protected: } TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, const TensorShape &output_shape, float alpha, float beta, - DataType data_type) + DataType data_type, bool accumulate=false) { // Create tensors TensorType a = create_tensor<TensorType>(shape_a, data_type, 1); @@ -98,12 +98,14 @@ protected: (disable_c) ? nullptr : &c, &dst, alpha, beta, - GEMMInfo(false, false, false, (reinterpret_output_as_3d ? output_shape[2] : 0), reinterpret_input_as_3d, false, GEMMLowpOutputStageInfo(), false, (reinterpret_input_as_3d - || reinterpret_output_as_3d))); - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + GEMMInfo(false, false, false, (reinterpret_output_as_3d ? output_shape[2] : 0), reinterpret_input_as_3d, false, GEMMLowpOutputStageInfo(), false, false, (reinterpret_input_as_3d + || reinterpret_output_as_3d), arm_compute::ActivationLayerInfo(), false /* fixed_format */, arm_compute::WeightFormat::UNSPECIFIED, false /* pretranspose_B */, accumulate)); + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(c.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + add_padding_x({ &a, &b, &c, &dst }); // Allocate tensors a.allocator()->allocate(); @@ -111,18 +113,33 @@ protected: c.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!c.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(a), 0); fill(AccessorType(b), 1); + if (accumulate) + { + fill(AccessorType(dst), 6); + } if(!disable_c) { fill(AccessorType(c), 2); } + // Run with variable inputs. + if(run_twice) + { + gemm.run(); + fill(AccessorType(a), 3); // Fill tensors with new seed after run + fill(AccessorType(b), 4); + if(!disable_c) + { + fill(AccessorType(c), 5); + } + } // Compute GEMM function gemm.run(); @@ -131,10 +148,9 @@ protected: } SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, float alpha, float beta, - DataType data_type) + DataType data_type, bool accumulate=false) { TensorShape shape_a_to_use = shape_a; - if(reinterpret_input_as_3d) { // Collapse the second and third dimension if the input is 3D @@ -145,6 +161,7 @@ protected: SimpleTensor<T> a{ shape_a_to_use, data_type, 1 }; SimpleTensor<T> b{ shape_b, data_type, 1 }; SimpleTensor<T> c{ output_shape, data_type, 1 }; + SimpleTensor<T> dst{ output_shape, data_type, 1 }; // Fill reference fill(a, 0); @@ -157,27 +174,96 @@ protected: const int m = reinterpret_output_as_3d ? output_shape[1] * output_shape[2] : output_shape[1]; const int batch_size = reinterpret_output_as_3d ? output_shape[3] : output_shape[2]; - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(c.data() + i * n, c.data(), n * sizeof(T)); } } + /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M), + therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K) + in order to be able to call reference implementation that works with (B x M x K) input. + Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */ + + // Define transposed shapes + TensorShape a_transposed_shape(a.shape().y(), a.shape().x()); + TensorShape b_transposed_shape(b.shape().y(), b.shape().x()); + + // Define transposed tensors + SimpleTensor<T> a_transposed{ a_transposed_shape, data_type }; + SimpleTensor<T> b_transposed{ b_transposed_shape, data_type }; + + // pretranspose a if necessary + if(pretranspose_a) + { + transpose_matrix<T>(a, a_transposed); + } + + // pretranspose b if necessary + if(pretranspose_b) + { + transpose_matrix<T>(b, b_transposed); + } + + // Run with variable inputs. + if(run_twice) + { + reference::gemm<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, alpha, disable_c ? 0.f : beta); + fill((pretranspose_a) ? a_transposed : a, 3); + fill((pretranspose_b) ? b_transposed : b, 4); + fill(c, 5); + } + + // Do in place summation + if (accumulate) + { + fill(dst, 6); + } + // Setting beta to 0 will effectively disable C for the // computation of the reference: alpha * A * B + 0 * C - return reference::gemm<T>(a, b, c, alpha, disable_c ? 0.f : beta); + // Use transposed tensors if boolean enabled else use original tensors + if (accumulate) + { + reference::gemm_accumulate<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, alpha, disable_c ? 0.f : beta, dst); + return dst; + } + else + { + return reference::gemm<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, alpha, disable_c ? 0.f : beta); + } } TensorType _target{}; SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool disable_c = false, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool pretranspose_a = false, bool pretranspose_b = false, bool run_twice = false> +class GEMMValidationFixture : protected GEMMGenericValidationFixture<TensorType, AccessorType, FunctionType, T, disable_c, reinterpret_input_as_3d, reinterpret_output_as_3d, pretranspose_a, pretranspose_b, run_twice> +{ +public: + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type) + { + GEMMGenericValidationFixture<TensorType, AccessorType, FunctionType, T, disable_c, reinterpret_input_as_3d, reinterpret_output_as_3d, pretranspose_a, pretranspose_b, run_twice>::setup(shape_a, shape_b, shape_c, output_shape, alpha, beta, pretranspose, data_type, false /*accumulate*/); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool disable_c = false, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool pretranspose_a = false, bool pretranspose_b = false, bool run_twice = false> +class GEMMAccumulateValidationFixture : protected GEMMGenericValidationFixture<TensorType, AccessorType, FunctionType, T, disable_c, reinterpret_input_as_3d, reinterpret_output_as_3d, pretranspose_a, pretranspose_b, run_twice> +{ +public: + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type) + { + bool accumulate = true; + GEMMGenericValidationFixture<TensorType, AccessorType, FunctionType, T, disable_c, reinterpret_input_as_3d, reinterpret_output_as_3d, pretranspose_a, pretranspose_b, run_twice>::setup(shape_a, shape_b, shape_c, output_shape, alpha, beta, pretranspose, data_type, accumulate); + } +}; + +template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType> class GEMMMatrixMultiplyValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, bool broadcast_bias, bool fp16_mixed_precision, const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch) { @@ -224,12 +310,14 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - GEMMFunctionType gemm; - gemm.configure(gpu_arch, &lhs, &rhs, &bias, &dst, alpha, beta, false, reshape_info, fp16_mixed_precision, act_info); + GEMMOperatorType gemm; + gemm.configure(gpu_arch, lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, false, reshape_info, fp16_mixed_precision, act_info); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &bias, &dst }); // Allocate tensors lhs.allocator()->allocate(); @@ -237,10 +325,10 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); @@ -248,7 +336,11 @@ protected: fill(AccessorType(bias), 2); // Compute GEMM - gemm.run(); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, + { ACL_SRC_1, &rhs }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -276,7 +368,7 @@ protected: if(broadcast_bias) { - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); @@ -290,11 +382,10 @@ protected: SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType> class GEMMMatrixMultiply3DValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, bool broadcast_bias, bool fp16_mixed_precision, const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch) { @@ -340,12 +431,14 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - GEMMFunctionType gemm; - gemm.configure(gpu_arch, &lhs, &rhs, &bias, &dst, alpha, beta, false, reshape_info, fp16_mixed_precision, act_info); + GEMMOperatorType gemm; + gemm.configure(gpu_arch, lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, false, reshape_info, fp16_mixed_precision, act_info); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &bias, &dst }); // Allocate tensors lhs.allocator()->allocate(); @@ -353,10 +446,10 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); @@ -364,7 +457,11 @@ protected: fill(AccessorType(bias), 2); // Compute GEMM - gemm.run(); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, + { ACL_SRC_1, &rhs }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -392,7 +489,7 @@ protected: fill(rhs, 1); fill(bias, 2); - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); @@ -405,11 +502,10 @@ protected: SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType> class GEMMMatrixMultiplyInterleavedTransposedValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, unsigned int v0, unsigned int h0, bool broadcast_bias, bool fp16_mixed_precision, const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch) { @@ -472,16 +568,22 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - ReshapeLHSFunctionType reshape_lhs; - ReshapeRHSFunctionType reshape_rhs; - GEMMFunctionType gemm; - reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info); - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(gpu_arch, &lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, true, reshape_info, fp16_mixed_precision, act_info); - - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ReshapeLHSOperatorType reshape_lhs; + ReshapeRHSOperatorType reshape_rhs; + GEMMOperatorType gemm; + reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info); + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + gemm.configure(gpu_arch, lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, true, reshape_info, fp16_mixed_precision, act_info); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + + // We do not pad when using image as it needs to comply to strict pitch alignment restrictions + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst }); + } // Allocate tensors lhs.allocator()->allocate(); @@ -491,12 +593,12 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); @@ -504,9 +606,15 @@ protected: fill(AccessorType(bias), 2); // Compute GEMM - reshape_lhs.run(); - reshape_rhs.run(); - gemm.run(); + ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } }; + reshape_lhs.run(reshape_lhs_pack); + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped }, + { ACL_SRC_1, &rhs_reshaped }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -534,7 +642,7 @@ protected: if(broadcast_bias) { - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); @@ -548,11 +656,10 @@ protected: SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType> class GEMMMatrixMultiplyInterleavedTransposed3DValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, float alpha, float beta, unsigned int v0, unsigned int h0, bool broadcast_bias, bool fp16_mixed_precision, const ActivationLayerInfo &act_info, DataType data_type, GPUTarget gpu_arch) { @@ -614,16 +721,22 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - ReshapeLHSFunctionType reshape_lhs; - ReshapeRHSFunctionType reshape_rhs; - GEMMFunctionType gemm; - reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info); - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(gpu_arch, &lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, true, reshape_info, fp16_mixed_precision, act_info); - - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ReshapeLHSOperatorType reshape_lhs; + ReshapeRHSOperatorType reshape_rhs; + GEMMOperatorType gemm; + reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info); + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + gemm.configure(gpu_arch, lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, true, reshape_info, fp16_mixed_precision, act_info); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + + // We do not pad when using image as it needs to comply to strict pitch alignment restrictions + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst }); + } // Allocate tensors lhs.allocator()->allocate(); @@ -633,12 +746,12 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); @@ -646,9 +759,15 @@ protected: fill(AccessorType(bias), 2); // Compute GEMM - reshape_lhs.run(); - reshape_rhs.run(); - gemm.run(); + ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } }; + reshape_lhs.run(reshape_lhs_pack); + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped }, + { ACL_SRC_1, &rhs_reshaped }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -676,7 +795,7 @@ protected: fill(rhs, 1); fill(bias, 2); - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); @@ -689,11 +808,10 @@ protected: SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType, bool fp_mixed_precision = false> +template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType, bool fp_mixed_precision = false> class GEMMMatrixMultiplyReshapedValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs, bool interleave_rhs, bool export_to_cl_image, DataType data_type, float alpha, float beta, bool broadcast_bias, bool lhs_transpose, const ActivationLayerInfo &act_info) { @@ -768,9 +886,9 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - ReshapeLHSFunctionType reshape_lhs; - ReshapeRHSFunctionType reshape_rhs; - GEMMFunctionType gemm; + ReshapeLHSOperatorType reshape_lhs; + ReshapeRHSOperatorType reshape_rhs; + GEMMOperatorType gemm; validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info)); validate_result = validate_result || !rhs_info.export_to_cl_image; @@ -779,13 +897,19 @@ protected: return nullptr; } - reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info); - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info); + reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info); + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + + // We do not pad when using image as it needs to comply to strict pitch alignment restrictions + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst }); + } // Allocate tensors lhs.allocator()->allocate(); @@ -795,12 +919,12 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); @@ -808,9 +932,15 @@ protected: fill(AccessorType(bias), 2); // Compute GEMM - reshape_lhs.run(); - reshape_rhs.run(); - gemm.run(); + ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } }; + reshape_lhs.run(reshape_lhs_pack); + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped }, + { ACL_SRC_1, &rhs_reshaped }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -838,7 +968,7 @@ protected: if(broadcast_bias) { - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); @@ -860,11 +990,10 @@ protected: SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType, bool fp_mixed_precision = false> +template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType, bool fp_mixed_precision = false> class GEMMMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs, bool interleave_rhs, bool export_to_cl_image, DataType data_type, float alpha, float beta, bool lhs_transpose, const ActivationLayerInfo &act_info) { @@ -936,9 +1065,9 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - ReshapeLHSFunctionType reshape_lhs; - ReshapeRHSFunctionType reshape_rhs; - GEMMFunctionType gemm; + ReshapeLHSOperatorType reshape_lhs; + ReshapeRHSOperatorType reshape_rhs; + GEMMOperatorType gemm; validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info)); validate_result = validate_result || !rhs_info.export_to_cl_image; @@ -947,13 +1076,19 @@ protected: return nullptr; } - reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info); - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info); + reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info); + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + + // We do not pad when using image as it needs to comply to strict pitch alignment restrictions + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst }); + } // Allocate tensors lhs.allocator()->allocate(); @@ -963,12 +1098,12 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); @@ -976,9 +1111,15 @@ protected: fill(AccessorType(bias), 2); // Compute GEMM - reshape_lhs.run(); - reshape_rhs.run(); - gemm.run(); + ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } }; + reshape_lhs.run(reshape_lhs_pack); + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped }, + { ACL_SRC_1, &rhs_reshaped }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1006,7 +1147,7 @@ protected: fill(rhs, 1); fill(bias, 2); - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); @@ -1027,11 +1168,10 @@ protected: SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSFunctionType, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSOperatorType, typename GEMMOperatorType> class GEMMMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, bool export_to_cl_image, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { @@ -1101,8 +1241,8 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - ReshapeRHSFunctionType reshape_rhs; - GEMMFunctionType gemm; + ReshapeRHSOperatorType reshape_rhs; + GEMMOperatorType gemm; validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info)); validate_result = validate_result || !rhs_info.export_to_cl_image; @@ -1111,12 +1251,18 @@ protected: return nullptr; } - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info); + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + gemm.configure(lhs.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + + // We do not pad when using image as it needs to comply to strict pitch alignment restrictions + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &rhs_reshaped, &bias, &dst }); + } // Allocate tensors lhs.allocator()->allocate(); @@ -1125,11 +1271,11 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); @@ -1137,8 +1283,13 @@ protected: fill(AccessorType(bias), 2); // Compute GEMM - reshape_rhs.run(); - gemm.run(); + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, + { ACL_SRC_1, &rhs_reshaped }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1166,7 +1317,7 @@ protected: if(broadcast_bias) { - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); @@ -1181,11 +1332,10 @@ protected: SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSFunctionType, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSOperatorType, typename GEMMOperatorType> class GEMMMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, bool export_to_cl_image, bool has_pad_y, DataType data_type, float alpha, float beta, const ActivationLayerInfo &act_info) { @@ -1253,8 +1403,8 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - ReshapeRHSFunctionType reshape_rhs; - GEMMFunctionType gemm; + ReshapeRHSOperatorType reshape_rhs; + GEMMOperatorType gemm; validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info)); validate_result = validate_result || !rhs_info.export_to_cl_image; @@ -1263,8 +1413,8 @@ protected: return nullptr; } - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info); + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + gemm.configure(lhs.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info); if(has_pad_y) { @@ -1273,9 +1423,15 @@ protected: dst.info()->extend_padding(PaddingSize(2, 0, 1, 0)); } - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + + // We do not pad when using image as it needs to comply to strict pitch alignment restrictions + if(!rhs_info.export_to_cl_image) + { + add_padding_x({ &lhs, &rhs, &rhs_reshaped, &bias, &dst }); + } // Allocate tensors lhs.allocator()->allocate(); @@ -1284,11 +1440,11 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); @@ -1296,8 +1452,13 @@ protected: fill(AccessorType(bias), 2); // Compute GEMM - reshape_rhs.run(); - gemm.run(); + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, + { ACL_SRC_1, &rhs_reshaped }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1325,7 +1486,7 @@ protected: fill(rhs, 1); fill(bias, 2); - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); @@ -1339,11 +1500,10 @@ protected: SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType> class GEMMMatrixMultiplyNativeValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) { @@ -1403,12 +1563,14 @@ protected: kernel_info.activation_info = act_info; // Create and configure function - GEMMFunctionType gemm; - gemm.configure(&lhs, &rhs, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info); + GEMMOperatorType gemm; + gemm.configure(lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &bias, &dst }); // Allocate tensors lhs.allocator()->allocate(); @@ -1416,10 +1578,10 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); @@ -1427,7 +1589,11 @@ protected: fill(AccessorType(bias), 2); // Compute GEMM - gemm.run(); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, + { ACL_SRC_1, &rhs }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1455,7 +1621,7 @@ protected: if(broadcast_bias) { - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); @@ -1469,11 +1635,10 @@ protected: SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType> class GEMMMatrixMultiplyNative3DValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, DataType data_type, float alpha, float beta, const ActivationLayerInfo &act_info) { @@ -1532,12 +1697,14 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - GEMMFunctionType gemm; - gemm.configure(&lhs, &rhs, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info); + GEMMOperatorType gemm; + gemm.configure(lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + + add_padding_x({ &lhs, &rhs, &bias, &dst }); // Allocate tensors lhs.allocator()->allocate(); @@ -1545,10 +1712,10 @@ protected: bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); @@ -1556,7 +1723,11 @@ protected: fill(AccessorType(bias), 2); // Compute GEMM - gemm.run(); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, + { ACL_SRC_1, &rhs }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1584,7 +1755,7 @@ protected: fill(rhs, 1); fill(bias, 2); - // In case of broadcast, we need simply copy the first into the following "M" ones + // In case of broadcast, we need to simply copy the first into the following "M" ones for(int i = 1; i < m * batch_size; i++) { memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); @@ -1597,7 +1768,170 @@ protected: SimpleTensor<T> _reference{}; }; +template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSOperatorType, typename GEMMOperatorType> +class GEMMMatrixMultiplyReshapedOnlyRhsMMULValidationFixture : public framework::Fixture +{ +public: + void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, bool export_to_cl_image, DataType data_type, float alpha, + float beta, bool broadcast_bias, + const ActivationLayerInfo &act_info) + { + GEMMLHSMatrixInfo lhs_info; + lhs_info.m0 = m0; + lhs_info.k0 = k0; + + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = n0; + rhs_info.k0 = k0; + rhs_info.interleave = true; + rhs_info.transpose = false; + rhs_info.h0 = 4; + rhs_info.export_to_cl_image = export_to_cl_image; + + // Set the tensor shapes for LHS and RHS matrices + const TensorShape lhs_shape(k, m, batch_size); + const TensorShape rhs_shape(n, k, batch_size); + const TensorShape bias_shape(n, + broadcast_bias ? 1 : m, + broadcast_bias ? 1 : batch_size); + + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias, act_info); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, beta, broadcast_bias, act_info); + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported."); + using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type; + + DistributionType distribution{ T(-1.0f), T(1.0f) }; + library->fill(tensor, distribution, i); + + // Fill border with infinity in order to check the presence of NaN values (i.e. inf * 0) + DistributionType distribution_inf{ T(std::numeric_limits<float>::infinity()), T(std::numeric_limits<float>::infinity()) }; + library->fill_borders_with_garbage(tensor, distribution_inf, i); + } + + TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, + DataType data_type, float alpha, float beta, bool broadcast_bias, const ActivationLayerInfo &act_info) + { + // Create tensors + TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1); + TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1); + TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1); + TensorType rhs_reshaped; + TensorType dst; + + const unsigned int M = lhs_shape[1]; + const unsigned int N = rhs_shape[0]; + const unsigned int K = lhs_shape[0]; + GEMMKernelInfo kernel_info; + kernel_info.m = M; + kernel_info.n = N; + kernel_info.k = K; + kernel_info.depth_output_gemm3d = 0; + kernel_info.reinterpret_input_as_3d = false; + kernel_info.broadcast_bias = broadcast_bias; + kernel_info.activation_info = act_info; + + // Create and configure function + ReshapeRHSOperatorType reshape_rhs; + GEMMOperatorType gemm; + + validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info)); + if(!validate_result) + { + return nullptr; + } + + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + + validate_result = bool(gemm.validate(lhs.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info)); + if(!validate_result) + { + return nullptr; + } + + gemm.configure(lhs.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + + // Allocate tensors + lhs.allocator()->allocate(); + rhs.allocator()->allocate(); + rhs_reshaped.allocator()->allocate(); + bias.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + + // Fill tensors + fill(AccessorType(lhs), 0); + fill(AccessorType(rhs), 1); + fill(AccessorType(bias), 2); + + // Compute GEMM + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, + { ACL_SRC_1, &rhs_reshaped }, + { ACL_SRC_2, &bias }, + { ACL_DST, &dst } }); + gemm.run(gemm_pack); + + return dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, float beta, bool broadcast_bias, + const ActivationLayerInfo &act_info) + { + if(!validate_result) + return SimpleTensor<T>(); + + TensorShape dst_shape = lhs_shape; + dst_shape[0] = rhs_shape[0]; + dst_shape[1] = lhs_shape[1]; + + // Create reference + SimpleTensor<T> lhs{ lhs_shape, data_type, 1 }; + SimpleTensor<T> rhs{ rhs_shape, data_type, 1 }; + SimpleTensor<T> bias{ dst_shape, data_type, 1 }; + + const int n = rhs_shape[0]; + const int m = lhs_shape[1]; + const int batch_size = lhs_shape[2]; + + // Fill reference + fill(lhs, 0); + fill(rhs, 1); + fill(bias, 2); + + if(broadcast_bias) + { + // In case of broadcast, we need to simply copy the first into the following "M" ones + for(int i = 1; i < m * batch_size; i++) + { + memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); + } + } + + return reference::activation_layer(reference::gemm<T>(lhs, rhs, bias, alpha, beta), act_info); + } + + bool validate_result = true; + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; + } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_GEMM_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_GEMMFIXTURE_H diff --git a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h index 1ce0eafead..59fc460869 100644 --- a/tests/validation/fixtures/GEMMInterleave4x4Fixture.h +++ b/tests/validation/fixtures/GEMMInterleave4x4Fixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class GEMMInterleave4x4ValidationFixture : public framework::Fixture { public: - template <typename...> void setup(size_t x, size_t y, DataType data_type) { _data_type = data_type; @@ -88,24 +87,25 @@ protected: // Create and configure function FunctionType f; - f.configure(&a, &b); + f.configure(a.info(), b.info()); - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); // Allocate tensors a.allocator()->allocate(); b.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); // Fill tensors fill(AccessorType(a), 0); fill(AccessorType(b), 0); - // Compute GEMM function - f.run(); + // Compute GEMM interleave kernel + ITensorPack tensors{ { ACL_SRC, &a }, { ACL_DST, &b } }; + f.run(tensors); return b; } diff --git a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h deleted file mode 100644 index e9ec1bc365..0000000000 --- a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2017-2019 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_GEMMLOWP_ASSEMBLY_FIXTURE -#define ARM_COMPUTE_TEST_GEMMLOWP_ASSEMBLY_FIXTURE - -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" -#include "tests/AssetsLibrary.h" -#include "tests/Globals.h" -#include "tests/IAccessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Fixture.h" -#include "tests/validation/Helpers.h" -#include "tests/validation/reference/GEMMLowp.h" - -#include <random> - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -template <typename TensorType, typename AccessorType, typename FunctionType, typename T2> -class GEMMLowpAssemblyFixture : public framework::Fixture -{ -public: - template <typename...> - void setup(size_t m, size_t n, size_t k) - { - const TensorShape shape_a(k, m); - const TensorShape shape_b(n, k); - const TensorShape shape_c(n, m); - _target = compute_target(shape_a, shape_b, shape_c); - _reference = compute_reference(shape_a, shape_b, shape_c); - } - -protected: - template <typename U> - void fill(U &&tensor, int i, int lo, int hi) - { - std::uniform_int_distribution<> distribution(lo, hi); - library->fill(tensor, distribution, i); - } - - TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c) - { - DataType dt_in = std::is_same<T2, int8_t>::value ? DataType::S8 : DataType::U8; - - // Create tensors - TensorType a = create_tensor<TensorType>(shape_a, dt_in, 1); - TensorType b = create_tensor<TensorType>(shape_b, dt_in, 1); - TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1); - - // Create and configure function - FunctionType gemmlowp; - gemmlowp.configure(&a, &b, nullptr, &c); - - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Allocate tensors - a.allocator()->allocate(); - b.allocator()->allocate(); - c.allocator()->allocate(); - - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Fill tensors - if(dt_in == DataType::S8) - { - fill(AccessorType(a), 0, -128, 127); - fill(AccessorType(b), 1, -128, 127); - } - else - { - fill(AccessorType(a), 0, 0, 255); - fill(AccessorType(b), 1, 0, 255); - } - fill(AccessorType(c), 2, 0, 0); - - // Compute GEMM function - gemmlowp.run(); - return c; - } - - SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c) - { - DataType dt = std::is_same<T2, int8_t>::value ? DataType::S8 : DataType::U8; - - // Create reference - SimpleTensor<T2> a{ shape_a, dt, 1 }; - SimpleTensor<T2> b{ shape_b, dt, 1 }; - - // Fill reference - if(dt == DataType::S8) - { - fill(a, 0, -128, 127); - fill(b, 1, -128, 127); - } - else - { - fill(a, 0, 0, 255); - fill(b, 1, 0, 255); - } - - return reference::gemmlowp<int32_t, T2>(a, b, shape_c); - } - - TensorType _target{}; - SimpleTensor<int32_t> _reference{}; -}; - -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */ diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h index 95f49601a5..aa4eedb75d 100644 --- a/tests/validation/fixtures/GEMMLowpFixture.h +++ b/tests/validation/fixtures/GEMMLowpFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,22 +21,20 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE -#define ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_GEMMLOWPFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_GEMMLOWPFIXTURE_H -#include "arm_compute/core/KernelDescriptors.h" -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" -#include "tests/AssetsLibrary.h" -#include "tests/Globals.h" -#include "tests/IAccessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Fixture.h" +#include "src/core/utils/quantization/AsymmHelpers.h" #include "tests/validation/Helpers.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/Validation.h" #include "tests/validation/reference/GEMMLowp.h" +#include "tests/validation/reference/ArithmeticOperations.h" +#include "tests/validation/reference/DequantizationLayer.h" -#include <random> +#include <cstdint> +#include <vector> namespace arm_compute { @@ -49,119 +47,145 @@ namespace template <typename U> void fill(U &&tensor, int i) { - switch(tensor.data_type()) - { - case DataType::QSYMM8_PER_CHANNEL: - { - int min_bound = 128; - int max_bound = -127; - for(size_t j = 0; j < tensor.quantization_info().scale().size(); j++) - { - std::pair<int, int> bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i); - if(bounds.first < min_bound) - { - min_bound = bounds.first; - } - if(bounds.second > max_bound) - { - max_bound = bounds.second; - } - } - std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound); - library->fill(tensor, distribution, i); - break; - } - case DataType::QASYMM8: - { - std::uniform_int_distribution<uint8_t> distribution(1, 254); - library->fill(tensor, distribution, i); - break; - } - case DataType::F16: - { - arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f }; - library->fill(tensor, distribution, i); - break; - } - case DataType::F32: - { - std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); - library->fill(tensor, distribution, i); - break; - } - default: - library->fill_tensor_uniform(tensor, i); - } + library->fill_tensor_uniform(tensor, i); } -template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false> -TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, - GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(), DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, - QuantizationInfo b_qinfo = QuantizationInfo()) +template <typename U> +void fill_quantized(U &&tensor, int i) { - // Create tensors - DataType data_type_output = output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : data_type_a; - - TensorType a = create_tensor<TensorType>(shape_a, data_type_a, 1); - TensorType b = create_tensor<TensorType>(shape_b, data_type_b, 1); // gemm output before output stage mismatch if i pass data_layout_output here. to be investigated - TensorType output = create_tensor<TensorType>(shape_output, data_type_output, 1); - - a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset)); + ARM_COMPUTE_ASSERT(is_data_type_quantized(tensor.data_type())); + library->fill_tensor_uniform(tensor, i); +} - if(data_type_b == DataType::QSYMM8_PER_CHANNEL) +template <typename U> +void fill(U &&tensor, int i, int32_t min, int32_t max) +{ + if (tensor.data_type() == DataType::S32) { + std::uniform_int_distribution<int32_t> distribution(min, max); + library->fill(tensor, distribution, i); + } + else if(tensor.data_type() == DataType::F32) { - b.info()->set_quantization_info(b_qinfo); + std::uniform_real_distribution<float> distribution((float)min, (float)max); + library->fill(tensor, distribution, i); } else { - b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset)); + ARM_COMPUTE_ERROR("NOT SUPPORTED!"); + } +} + +/** Information about how to fill tensors */ +struct TensorFillInfo +{ + // Bias fill range. Default values are arbitrary + int32_t min_bias {-20000}; + int32_t max_bias {20000}; + + // Output fill range. Default values are arbitrary + int32_t min_output {-20000}; + int32_t max_output {20000}; + + // Optional extra hash to randomize tensor filling + int32_t hash {0}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false, bool run_twice = false> +TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, + const QuantizationInfo& output_qinfo, DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, + GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(), bool reshape_b_only_on_first_run = false, const TensorFillInfo& finfo = TensorFillInfo(), + bool accumulate = false, bool dynamic_qinfo = false, DataType data_type_output = DataType::UNKNOWN) +{ + ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type_a)); + ARM_COMPUTE_ASSERT(data_type_a == data_type_b); + // If unknown, set to sensible defaults + if (data_type_output == DataType::UNKNOWN) { + data_type_output = output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : data_type_a; } + + // Create tensors + TensorType a = create_tensor<TensorType>(shape_a, data_type_a, 1, dynamic_qinfo ? QuantizationInfo(1.0,0,true) : a_qinfo); + TensorType b = create_tensor<TensorType>(shape_b, data_type_b, 1, dynamic_qinfo ? QuantizationInfo(1.0,0,true) : b_qinfo); // gemm output before output stage mismatch if i pass data_layout_output here. to be investigated + TensorType output = create_tensor<TensorType>(shape_output, data_type_output, 1, output_qinfo /* output_qinfo will be ignored when output stage type is None */); + TensorType bias; if(is_fused) { TensorShape bias_shape(shape_b[0]); - bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1); + bias = create_tensor<TensorType>(bias_shape,data_type_output == DataType::F32 ? DataType::F32 : DataType::S32, 1); } // Create and configure function // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output FunctionType gemmlowp; - // TODO (COMPMID-1672) - Extending the test to validate add bias in offset contribution - gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false, output_stage)); + gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, reshape_b_only_on_first_run, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false, + output_stage, false /*fp_mixed_precision*/, false /*fast_math*/, false /*broadcast_bias*/, + arm_compute::ActivationLayerInfo(), false /* fixed_format */, arm_compute::WeightFormat::UNSPECIFIED, + false /* pretranspose_B */, accumulate)); + + // If the QuantizationInfo is dynamic, it needs to be settable after configure (note that we also force it to be dynamic) + if (dynamic_qinfo) + { + a.info()->set_quantization_info(QuantizationInfo(a_qinfo.scale(), a_qinfo.offset(), true)); + b.info()->set_quantization_info(QuantizationInfo(b_qinfo.scale(), b_qinfo.offset(), true)); + } + + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output.info()->is_resizable()); - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &a, &b, &output }); // Allocate tensors a.allocator()->allocate(); b.allocator()->allocate(); output.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output.info()->is_resizable()); // Fill tensors - fill(AccessorType(a), 0); - fill(AccessorType(b), 1); + fill_quantized(AccessorType(a), 0 + finfo.hash); + fill_quantized(AccessorType(b), 1 + finfo.hash); + + if (accumulate) + { + ARM_COMPUTE_ASSERT(accumulate != run_twice); + fill(AccessorType(output), 6 + finfo.hash, finfo.min_output, finfo.max_output); + } if(is_fused) { - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); bias.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - fill(AccessorType(bias), 2); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + fill(AccessorType(bias), 2 + finfo.hash, finfo.min_bias, finfo.max_bias); } + + // Run with variable inputs. + if(run_twice) + { + gemmlowp.run(); + fill_quantized(AccessorType(a), 3 + finfo.hash); // Fill tensors with new seed after run + fill_quantized(AccessorType(b), 4 + finfo.hash); + if(is_fused) + { + fill(AccessorType(bias), 5 + finfo.hash, finfo.min_bias, finfo.max_bias); + } + } + // Compute GEMM function gemmlowp.run(); return output; } -template <bool reinterpret_input_as_3d, typename TI = uint8_t, typename TW = uint8_t> -SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, - DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, QuantizationInfo b_qinfo = QuantizationInfo()) +template <bool reinterpret_input_as_3d, typename TI = uint8_t, typename TW = uint8_t, bool pretranspose_A = false, bool pretranspose_B = false, bool run_twice = false> +SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, + DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, const TensorFillInfo& finfo = TensorFillInfo()) { + ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type_a)); + ARM_COMPUTE_ASSERT(data_type_a == data_type_b); TensorShape shape_a_to_use = shape_a; if(reinterpret_input_as_3d) { @@ -170,101 +194,269 @@ SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, con } // Create reference - SimpleTensor<TI> a{ shape_a_to_use, data_type_a, 1 }; - SimpleTensor<TW> b{ shape_b, data_type_b, 1, data_type_b == DataType::QSYMM8_PER_CHANNEL ? b_qinfo : QuantizationInfo(1.0f / 255, b_offset) }; + SimpleTensor<TI> a{ shape_a_to_use, data_type_a, 1, a_qinfo }; + SimpleTensor<TW> b{ shape_b, data_type_b, 1, b_qinfo }; + + TensorShape shape_a_to_use_transposed{ shape_a_to_use }; + TensorShape shape_b_transposed{ shape_b }; + + shape_a_to_use_transposed.set(0, shape_a_to_use[1]); + shape_a_to_use_transposed.set(1, shape_a_to_use[0]); + shape_b_transposed.set(0, shape_b[1]); + shape_b_transposed.set(1, shape_b[0]); + + SimpleTensor<TI> a_transposed{ shape_a_to_use_transposed, data_type_a, 1, a_qinfo }; + SimpleTensor<TW> b_transposed{ shape_b_transposed, data_type_b, 1, b_qinfo }; // Fill reference - fill(a, 0); - fill(b, 1); - return reference::gemmlowp_matrix_multiply_core<int32_t, TI, TW>(a, b, shape_output, a_offset, b_offset); -} + fill_quantized(a, 0 + finfo.hash); + fill_quantized(b, 1 + finfo.hash); + + // Transpose reference if required + /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M), + therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K) + in order to be able to call reference implementation that works with (B x M x K) input. + Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */ + if(pretranspose_A) + { + transpose_matrix<TI>(a, a_transposed); + } + + if(pretranspose_B) + { + transpose_matrix<TW>(b, b_transposed); + } + + // Run with variable inputs. + const int32_t a_offset = a_qinfo.uniform().offset; + const int32_t b_offset = b_qinfo.uniform().offset; + + if(run_twice) + { + reference::gemmlowp_matrix_multiply_core<int32_t, TI, TW>((pretranspose_A ? a_transposed : a), (pretranspose_B ? b_transposed : b), shape_output, a_offset, b_offset); + fill_quantized((pretranspose_A) ? a_transposed : a, 3 + finfo.hash); + fill_quantized((pretranspose_B) ? b_transposed : b, 4 + finfo.hash); + } + + return reference::gemmlowp_matrix_multiply_core<int32_t, TI, TW>((pretranspose_A ? a_transposed : a), (pretranspose_B ? b_transposed : b), shape_output, a_offset, b_offset); } +} // namespace -template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false> -class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture +template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool run_twice = false> +class GEMMLowpGenericMatrixMultiplyCoreValidationFixture : public framework::Fixture { public: - template <typename...> - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset) + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, bool accumulate=false, bool dynamic_qinfo = false) { - _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset); - _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset); + const auto a_qinfo = QuantizationInfo(1.0f / 255, a_offset); + const auto b_qinfo = QuantizationInfo(1.0f / 255, b_offset); + TensorFillInfo finfo; + _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate, dynamic_qinfo); + _reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate); } protected: - TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset) + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, const bool accumulate, const bool dynamic_qinfo) { - return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t>(shape_a, shape_b, shape_output, a_offset, b_offset); + const auto output_qinfo = QuantizationInfo(); // No output stage + return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, DataType::QASYMM8, DataType::QASYMM8, GEMMLowpOutputStageInfo(), false, finfo, accumulate, dynamic_qinfo); } - SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset) + SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, bool accumulate) { - return compute_gemmlowp_reference<reinterpret_input_as_3d>(shape_a, shape_b, shape_output, a_offset, b_offset); + SimpleTensor<int32_t> ref_output = compute_gemmlowp_reference<reinterpret_input_as_3d, uint8_t, uint8_t, false, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, + DataType::QASYMM8, DataType::QASYMM8, finfo); + + if (accumulate) + { + SimpleTensor<int32_t> output{ shape_output, DataType::S32, 1 }; + fill(output, 6 + finfo.hash, finfo.min_output, finfo.max_output); + reference::arithmetic_operation<int32_t>(reference::ArithmeticOperation::ADD, output, ref_output, output, ConvertPolicy::SATURATE); + return output; + } + + return ref_output; } TensorType _target{}; SimpleTensor<int32_t> _reference{}; }; -template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t> -class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public framework::Fixture +template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool run_twice = false> +class GEMMLowpMatrixMultiplyCoreValidationFixture : protected GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice> { public: - template <typename...> - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage, DataType data_type_b) + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset) { - ARM_COMPUTE_EXPECT(output_stage.type != GEMMLowpOutputStageType::NONE, framework::LogLevel::ERRORS); - DataType data_type_a = data_type_b == DataType::QASYMM8_SIGNED ? DataType::QASYMM8_SIGNED : DataType::QASYMM8; + GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice>::setup(shape_a, shape_b, shape_output, a_offset, b_offset, false /* accumulate */); + } +}; - if(data_type_b == DataType::QSYMM8_PER_CHANNEL) - { - output_stage.is_quantized_per_channel = true; - const size_t num_channels = shape_b[0]; - std::vector<float> scales(num_channels); - std::uniform_real_distribution<float> distribution(0.f, 1.f); - library->fill(scales, distribution, 0); - output_stage.gemmlowp_multipliers.resize(num_channels); - output_stage.gemmlowp_shifts.resize(num_channels); - for(size_t i = 0; i < num_channels; ++i) - { - quantization::calculate_quantized_multiplier(scales[i], &output_stage.gemmlowp_multipliers[i], &output_stage.gemmlowp_shifts[i]); - } +template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool run_twice = false> +class GEMMLowpMatrixMultiplyAccumulateValidationFixture : protected GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice> +{ +public: + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset) + { + GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice>::setup(shape_a, shape_b, shape_output, a_offset, b_offset, true /* accumulate */); + } +}; - _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales)); - _target = compute_target(shape_a, shape_b, shape_output, a_offset, 0, output_stage, data_type_a, data_type_b, QuantizationInfo(scales)); - } - else - { - _reference = compute_reference(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo()); - _target = compute_target(shape_a, shape_b, shape_output, a_offset, b_offset, output_stage, data_type_a, data_type_b, QuantizationInfo()); - } +template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool run_twice = false> +class GEMMLowpMatrixMultiplyCoreDynamicQuantizationFixture : protected GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice> +{ +public: + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset) + { + GEMMLowpGenericMatrixMultiplyCoreValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, run_twice>::setup(shape_a, shape_b, shape_output, a_offset, b_offset, false /* accumulate */, true /* dynamic_qinfo */); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t, bool run_twice = false> +class GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public framework::Fixture +{ +public: + /** Dynamically initialize the quantization info with saturation awareness + */ + template <typename T> + static void setup_quantization(DataType data_type, const TensorShape& shape_a, const TensorShape& shape_b, QuantizationInfo& a_qinfo, QuantizationInfo& b_qinfo, QuantizationInfo& output_qinfo, TensorFillInfo& finfo) + { + // This hash is used by random generators. There may be hash collisions but + // this is intentional as it's a very easy way to make the the current + // random generation process almost different for many test configurations, + // which were using the same set of values before. + finfo.hash = shape_a[0] + shape_a[1] + shape_b[0] + shape_b[1]; + + const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max()); + const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min()); + + std::mt19937 generator(library->seed() + finfo.hash); + std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f); + std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max); + + const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + + const int32_t offset_lhs = distribution_t(generator); + const int32_t offset_rhs = distribution_t(generator); + + a_qinfo = QuantizationInfo(scale_lhs, offset_lhs); + b_qinfo = QuantizationInfo(scale_rhs, offset_rhs); + + // reinterpret_input_as_3d or reinterpret_output_as_3d can be ignored, as the underlying gemm / matmul computation + // is equivalent to a standard 2D one with m-n-k dimensions + const int m = shape_a.y(); + const int n = shape_b.x(); + const int k = shape_a.x(); + + const float bias_fraction = 0.5f; // We enabled is_fused in compute_gemmlowp_target below, thus bias is included + + QuantizationHint q_hint = suggest_matmul_dst_q_info_and_bias(a_qinfo, b_qinfo, m, n, k, data_type, bias_fraction); + output_qinfo = q_hint.q_info; + finfo.min_bias = q_hint.bias_min; + finfo.max_bias = q_hint.bias_max; + + // Both target and reference implementations use negated offsets, i.e. + // float_val = (int_val + offset) * scale + // instead of + // float_val = (int_val - offset) * scale + // as usual. Therefore, after calculating the output quantization above, we + // negate the offsets of inputs' offsets. + a_qinfo = QuantizationInfo(scale_lhs, -offset_lhs); + b_qinfo = QuantizationInfo(scale_rhs, -offset_rhs); + } + + /** Initialize output stage info from quantization info */ + static Status init_gemmlowp_output_stage_info( + DataType data_type, + const QuantizationInfo& a_qinfo, + const QuantizationInfo& b_qinfo, + const QuantizationInfo& output_qinfo, + GEMMLowpOutputStageType type, + GEMMLowpOutputStageInfo &gemmlowp_output_stage_info) + { + ARM_COMPUTE_RETURN_ERROR_ON(!is_data_type_quantized_asymmetric(data_type)); + + const UniformQuantizationInfo aq_unif = a_qinfo.uniform(); + const UniformQuantizationInfo bq_unif = b_qinfo.uniform(); + const UniformQuantizationInfo oq_unif = output_qinfo.uniform(); + + float multiplier = (aq_unif.scale * bq_unif.scale) / oq_unif.scale; + int32_t int_multiplier; + int32_t shift; + + ARM_COMPUTE_RETURN_ON_ERROR( + quantization::calculate_quantized_multiplier(multiplier, &int_multiplier, &shift)); + + int32_t type_min = 0; + int32_t type_max = 0; + std::tie(type_min, type_max) = quantization::get_quantized_asymmetric_output_min_max(output_qinfo, ActivationLayerInfo(), data_type); + + gemmlowp_output_stage_info.gemmlowp_real_multiplier = multiplier; + gemmlowp_output_stage_info.gemmlowp_multiplier = int_multiplier; + gemmlowp_output_stage_info.gemmlowp_multipliers = { int_multiplier }; + gemmlowp_output_stage_info.gemmlowp_shift = shift; + gemmlowp_output_stage_info.gemmlowp_shifts = { shift }; + gemmlowp_output_stage_info.gemmlowp_offset = oq_unif.offset; + gemmlowp_output_stage_info.type = type; + gemmlowp_output_stage_info.gemmlowp_min_bound = type_min; + gemmlowp_output_stage_info.gemmlowp_max_bound = type_max; + + return Status{}; + } + + /** Currently this fixture only tests the following data type configurations: + * + * 1. a and b are of the same data type + * 2. The data type is quantized asymmetric + * + */ + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, GEMMLowpOutputStageType output_stage_type, DataType data_type, + bool reshape_b_only_on_first_run) + { + ARM_COMPUTE_ASSERT(output_stage_type != GEMMLowpOutputStageType::NONE); + ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type)); + + // Randomized dynamic quantization: randomize quantization info in a way that ensures no result saturation + // most of the time + QuantizationInfo a_qinfo; + QuantizationInfo b_qinfo; + QuantizationInfo output_qinfo; + TensorFillInfo finfo; + setup_quantization<TI>(data_type, shape_a, shape_b, a_qinfo, b_qinfo, output_qinfo, finfo); + + GEMMLowpOutputStageInfo output_stage; + init_gemmlowp_output_stage_info(data_type, a_qinfo, b_qinfo, output_qinfo, output_stage_type, output_stage); + + _reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, data_type, data_type, output_stage, finfo); + _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, data_type, data_type, output_stage, reshape_b_only_on_first_run, finfo); } protected: - TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage, - DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo) + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const QuantizationInfo& output_qinfo, + DataType data_type_a, DataType data_type_b, const GEMMLowpOutputStageInfo& output_stage, bool reshape_b_only_on_first_run = false, const TensorFillInfo& finfo = TensorFillInfo()) { - return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true>(shape_a, shape_b, shape_output, a_offset, b_offset, - output_stage, data_type_a, data_type_b, b_qinfo); + return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true, run_twice>(shape_a, shape_b, shape_output, a_qinfo, + b_qinfo, output_qinfo, data_type_a, data_type_b, output_stage, reshape_b_only_on_first_run, finfo); } - SimpleTensor<TI> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, int32_t a_offset, int32_t b_offset, - GEMMLowpOutputStageInfo output_stage, DataType data_type_a, DataType data_type_b, QuantizationInfo b_qinfo) + SimpleTensor<TI> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, + DataType data_type_a, DataType data_type_b, const GEMMLowpOutputStageInfo& output_stage, const TensorFillInfo& finfo = TensorFillInfo()) { - SimpleTensor<int32_t> output = compute_gemmlowp_reference<reinterpret_input_as_3d, TI, TW>(shape_a, shape_b, shape_output, a_offset, b_offset, data_type_a, data_type_b, b_qinfo); + SimpleTensor<int32_t> output = compute_gemmlowp_reference<reinterpret_input_as_3d, TI, TW, false, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, data_type_a, data_type_b, finfo); TensorShape bias_shape(shape_b[0]); SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 }; - fill(bias, 2); + (run_twice) ? fill(bias, 5 + finfo.hash, finfo.min_bias, finfo.max_bias) : fill(bias, 2 + finfo.hash, finfo.min_bias, finfo.max_bias); // Fill bias with same seed as last run of gemmlowp_target switch(output_stage.type) { case GEMMLowpOutputStageType::QUANTIZE_DOWN: - return reference::gemmlowp_quantize_down_scale<int32_t, TW>(output, bias, + return reference::gemmlowp_quantize_down_scale<int32_t, TI>(output, bias, output_stage.gemmlowp_offset, output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound); break; case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT: - return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TW>(output, bias, + return reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TI>(output, bias, output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_offset, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound); break; default: @@ -276,11 +468,78 @@ protected: SimpleTensor<TI> _reference{}; }; +template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, bool run_twice = false> +class GEMMLowpDequantizedMatrixMultiplyValidationFixture : public framework::Fixture +{ +public: + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, bool accumulate) + { + const bool dynamic_qinfo = false; + const auto a_qinfo = QuantizationInfo(1.0f / 255, a_offset); + const auto b_qinfo = QuantizationInfo(5.0f / 255, b_offset); + TensorFillInfo finfo; + _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate, dynamic_qinfo); + _reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate, dynamic_qinfo); + } + +protected: + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, const bool accumulate, const bool dynamic_qinfo) + { + const auto output_qinfo = QuantizationInfo(); + return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, GEMMLowpOutputStageInfo(), false, finfo, accumulate, dynamic_qinfo, DataType::F32); + } + + SimpleTensor<float> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, bool accumulate, const bool dynamic_qinfo) + { + QuantizationInfo s32_ref_output_quant_info = QuantizationInfo(a_qinfo.uniform().scale * b_qinfo.uniform().scale, 0, dynamic_qinfo); + + SimpleTensor<int32_t> s32_ref_output = compute_gemmlowp_reference<reinterpret_input_as_3d, int8_t, int8_t, false, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, + DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, finfo); + s32_ref_output.quantization_info(s32_ref_output_quant_info); + + SimpleTensor<float> f32_ref_output(s32_ref_output.shape(), DataType::F32); + f32_ref_output = reference::dequantization_layer<float, int32_t>(s32_ref_output); + + if (accumulate) + { + SimpleTensor<float> output{ shape_output, DataType::F32, 1 }; + fill(output, 6 + finfo.hash, finfo.min_output, finfo.max_output); + reference::arithmetic_operation<float>(reference::ArithmeticOperation::ADD, output, f32_ref_output, output, ConvertPolicy::SATURATE); + return output; + } + + return f32_ref_output; + } + + TensorType _target{}; + SimpleTensor<float> _reference{}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t, bool run_twice = false> +class GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture : public GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, TI, TW, run_twice> +{ +public: + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, GEMMLowpOutputStageType output_stage_type, DataType data_type, bool reshape_b_only_on_first_run) + { + GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, TI, TW, run_twice>::setup(shape_a, shape_b, + shape_output, output_stage_type, data_type, reshape_b_only_on_first_run); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false, typename TI = uint8_t, typename TW = uint8_t, bool run_twice = false> +class GEMMLowpBatchedMatrixMultiplyCoreFusedOffsetOutputFixture : public GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, TI, TW, run_twice> +{ +public: + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, GEMMLowpOutputStageType output_stage_type, DataType data_type, bool reshape_b_only_on_first_run) + { + GEMMLowpGenericMatrixMultiplyCoreFusedOffsetOutputValidationFixture<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, TI, TW, run_twice>::setup(shape_a, shape_b, shape_output, output_stage_type, data_type, reshape_b_only_on_first_run); + } +}; + template <typename TensorType, typename AccessorType, typename FunctionType> class GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias) { _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias); @@ -316,27 +575,27 @@ protected: output_stage_info.output_data_type = DataType::QASYMM8; output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info); - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(c.info()->is_resizable()); // Allocate tensors a.allocator()->allocate(); c.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!c.info()->is_resizable()); // Fill tensor fill(AccessorType(a), 0); if(add_bias) { - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); // Allocate bias tensor b.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); // Fill tensor fill(AccessorType(b), 1); @@ -382,7 +641,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType> class GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max, bool add_bias) { _target = compute_target(shape, result_offset, result_mult_int, result_shift, min, max, add_bias); @@ -418,27 +676,27 @@ protected: output_stage_info.output_data_type = DataType::QASYMM8_SIGNED; output_stage.configure(&a, add_bias ? &b : nullptr, &c, output_stage_info); - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(c.info()->is_resizable()); // Allocate tensors a.allocator()->allocate(); c.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!c.info()->is_resizable()); // Fill tensor fill(AccessorType(a), 0); if(add_bias) { - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); // Allocate bias tensor b.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); // Fill tensor fill(AccessorType(b), 1); @@ -484,7 +742,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType> class GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias) { _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias); @@ -512,27 +769,27 @@ protected: FunctionType output_stage; output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(c.info()->is_resizable()); // Allocate tensors a.allocator()->allocate(); c.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!c.info()->is_resizable()); // Fill tensor fill(AccessorType(a), 0); if(add_bias) { - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); // Allocate bias tensor b.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); // Fill tensor fill(AccessorType(b), 1); @@ -579,7 +836,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType> class GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max, bool add_bias) { _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias); @@ -607,27 +863,27 @@ protected: FunctionType output_stage; output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(c.info()->is_resizable()); // Allocate tensors a.allocator()->allocate(); c.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!c.info()->is_resizable()); // Fill tensor fill(AccessorType(a), 0); if(add_bias) { - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); // Allocate bias tensor b.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); // Fill tensor fill(AccessorType(b), 1); @@ -674,7 +930,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture : public framework::Fixture { public: - template <typename...> void setup(DataType data_type, TensorShape shape, float result_real_multiplier, int32_t result_offset, int32_t min, int32_t max, bool add_bias) { _target = compute_target(data_type, shape, result_real_multiplier, result_offset, min, max, add_bias); @@ -712,27 +967,27 @@ protected: FunctionType output_stage; output_stage.configure(&a, add_bias ? &b : nullptr, &c, info); - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(c.info()->is_resizable()); // Allocate tensors a.allocator()->allocate(); c.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!c.info()->is_resizable()); // Fill tensor fill(AccessorType(a), 0); if(add_bias) { - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); // Allocate bias tensor b.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); // Fill tensor fill(AccessorType(b), 1); @@ -777,7 +1032,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType> class GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max, bool add_bias) { _target = compute_target(shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias); @@ -805,27 +1059,27 @@ protected: FunctionType output_stage; output_stage.configure(&a, add_bias ? &b : nullptr, &c, result_fixedpoint_multiplier, result_shift, min, max); - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(c.info()->is_resizable()); // Allocate tensors a.allocator()->allocate(); c.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!c.info()->is_resizable()); // Fill tensor fill(AccessorType(a), 0); if(add_bias) { - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); // Allocate bias tensor b.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); // Fill tensor fill(AccessorType(b), 1); @@ -868,11 +1122,10 @@ protected: SimpleTensor<int16_t> _reference{}; }; -template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType> class GEMMLowpMatrixMultiplyReshapedValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs, bool interleave_rhs, DataType data_type) { @@ -938,15 +1191,17 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - ReshapeLHSFunctionType reshape_lhs; - ReshapeRHSFunctionType reshape_rhs; + ReshapeLHSOperatorType reshape_lhs; + ReshapeRHSOperatorType reshape_rhs; GEMMFunctionType gemm; - reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info); - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K)); + reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info); + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K)); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst }); // Allocate tensors lhs.allocator()->allocate(); @@ -955,20 +1210,23 @@ protected: rhs_reshaped.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); fill(AccessorType(rhs), 1); // Compute GEMM - reshape_lhs.run(); - reshape_rhs.run(); - gemm.run(); + ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } }; + reshape_lhs.run(reshape_lhs_pack); + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1014,11 +1272,10 @@ protected: SimpleTensor<int32_t> _reference{}; }; -template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType> class GEMMLowpMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs, bool interleave_rhs, DataType data_type) { @@ -1088,15 +1345,17 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - ReshapeLHSFunctionType reshape_lhs; - ReshapeRHSFunctionType reshape_rhs; + ReshapeLHSOperatorType reshape_lhs; + ReshapeRHSOperatorType reshape_rhs; GEMMFunctionType gemm; - reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info); - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h)); + reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info); + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h)); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst }); // Allocate tensors lhs.allocator()->allocate(); @@ -1105,20 +1364,23 @@ protected: rhs_reshaped.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!lhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); fill(AccessorType(rhs), 1); // Compute GEMM - reshape_lhs.run(); - reshape_rhs.run(); - gemm.run(); + ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } }; + reshape_lhs.run(reshape_lhs_pack); + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1166,11 +1428,10 @@ protected: SimpleTensor<int32_t> _reference{}; }; -template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType> +template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType> class GEMMLowpMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type) { @@ -1239,13 +1500,15 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - ReshapeRHSFunctionType reshape_rhs; + ReshapeRHSOperatorType reshape_rhs; GEMMFunctionType gemm; - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info); + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + + add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst }); // Allocate tensors lhs.allocator()->allocate(); @@ -1253,18 +1516,20 @@ protected: rhs_reshaped.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); fill(AccessorType(rhs), 1); // Compute GEMM - reshape_rhs.run(); - gemm.run(); + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1305,11 +1570,372 @@ protected: SimpleTensor<int32_t> _reference{}; }; -template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType> +template <typename T, typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType, typename ReduceOperation, typename CastOperation> +class GEMMLowpMatrixMultiplyReshapedOnlyRHSMMULOutputStageValidationFixture : public framework::Fixture +{ +public: + void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, + unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, bool broadcast_bias, DataType data_type) + { + GEMMLowpOutputStageInfo output_stage; + output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT; + output_stage.output_data_type = data_type; + output_stage.gemmlowp_multipliers = std::vector<int32_t> { 1 }; + output_stage.gemmlowp_shifts = std::vector<int32_t> { 1 }; + output_stage.gemmlowp_multipliers[0] = 1; + output_stage.gemmlowp_shifts[0] = 1; + output_stage.gemmlowp_offset = 0; + constexpr float scale = 0.001f; + quantization::calculate_quantized_multiplier(scale, &output_stage.gemmlowp_multipliers[0], &output_stage.gemmlowp_shifts[0]); + output_stage.gemmlowp_min_bound = -100; + output_stage.gemmlowp_max_bound = 100; + + GEMMLHSMatrixInfo lhs_info; + lhs_info.m0 = m0; + lhs_info.k0 = k0; + + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = n0; + rhs_info.k0 = k0; + rhs_info.h0 = h0; + rhs_info.interleave = interleave_rhs; + rhs_info.transpose = transpose_rhs; + + int a_offset = 1; + int b_offset = 1; + + // Set the tensor shapes for LHS and RHS matrices + const TensorShape lhs_shape(k, m, batch_size); + const TensorShape rhs_shape(n, k, batch_size); + const TensorShape bias_shape(n, + broadcast_bias ? 1 : m, + broadcast_bias ? 1 : batch_size); + + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, output_stage, a_offset, b_offset); + if(gemm_validated == true) + { + _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, output_stage, a_offset, b_offset); + } + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + switch(tensor.data_type()) + { + case DataType::QASYMM8: + { + // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path + std::uniform_int_distribution<> distribution(1, 254); + library->fill(tensor, distribution, i); + } + break; + case DataType::QASYMM8_SIGNED: + { + std::uniform_int_distribution<> distribution(-127, 126); + library->fill(tensor, distribution, i); + } + break; + case DataType::S32: + { + std::uniform_int_distribution<> distribution(-10000, 10000); + library->fill(tensor, distribution, i); + } + break; + default: + ARM_COMPUTE_ERROR("Unsupported data type"); + } + } + + TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, + const GEMMRHSMatrixInfo &rhs_info, DataType data_type, GEMMLowpOutputStageInfo output_stage, const int a_offset, const int b_offset) + { + // Create tensors + TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1, QuantizationInfo(1.0f / 255, a_offset)); + TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1, QuantizationInfo(1.0f / 255, b_offset)); + TensorType bias = create_tensor<TensorType>(bias_shape, DataType::S32, 1); + TensorType dst; + TensorType rhs_reshaped; + + const unsigned int M = lhs_shape[1]; + const unsigned int N = rhs_shape[0]; + const unsigned int K = lhs_shape[0]; + + // Tensors for precomputing sum of lhs rows / rhs columns + TensorType vec_sum_rows = create_tensor<TensorType>(TensorShape(M, 1, lhs_shape[2]), DataType::S32, 1); + TensorType vec_sum_cols = create_tensor<TensorType>(TensorShape(N, 1, rhs_shape[2]), DataType::S32, 1); + + GEMMKernelInfo gemm_info; + gemm_info.m = M; + gemm_info.n = N; + gemm_info.k = K; + gemm_info.lhs_info = lhs_info; + gemm_info.rhs_info = rhs_info; + gemm_info.output_stage = output_stage; + gemm_info.a_offset = a_offset; + gemm_info.b_offset = b_offset; + // The output tensor will be auto-initialized within the function + + // Create and configure function + ReshapeRHSOperatorType reshape_rhs; + GEMMFunctionType gemm; + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + + // If GEMM is not validated, do not try to run. The validation will check + // if the technology supports this extension. If not, the test will be skipped. + // If it supports, the test will fail anyway because target and reference + // will not match. + gemm_validated = bool(gemm.validate(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info, vec_sum_cols.info(), vec_sum_rows.info(), bias.info())); + if(gemm_validated == true) + { + gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info, vec_sum_cols.info(), vec_sum_rows.info(), bias.info()); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + + // Allocate tensors + lhs.allocator()->allocate(); + rhs.allocator()->allocate(); + rhs_reshaped.allocator()->allocate(); + bias.allocator()->allocate(); + vec_sum_cols.allocator()->allocate(); + vec_sum_rows.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!vec_sum_cols.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!vec_sum_rows.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + + // Fill tensors + fill(AccessorType(lhs), 0); + fill(AccessorType(rhs), 1); + fill(AccessorType(bias), 2); + + TensorType lhs_32 = create_tensor<TensorType>(lhs_shape, DataType::S32, 1); + TensorType rhs_32 = create_tensor<TensorType>(rhs_shape, DataType::S32, 1); + CastOperation cast_lhs; + CastOperation cast_rhs; + cast_lhs.configure(&lhs, &lhs_32, ConvertPolicy::SATURATE); + cast_rhs.configure(&rhs, &rhs_32, ConvertPolicy::SATURATE); + lhs_32.allocator()->allocate(); + rhs_32.allocator()->allocate(); + cast_lhs.run(); + cast_rhs.run(); + + ReduceOperation lhs_sum_rows; + ReduceOperation rhs_sum_cols; + + lhs_sum_rows.configure(&lhs_32, &vec_sum_rows, 0, ReductionOperation::SUM, false); + rhs_sum_cols.configure(&rhs_32, &vec_sum_cols, 1, ReductionOperation::SUM); + + lhs_sum_rows.run(); + rhs_sum_cols.run(); + + // Compute GEMM + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs_reshaped }, { ACL_SRC_2, &bias }, { ACL_DST, &dst }, { ACL_VEC_COL_SUM, &vec_sum_cols }, { ACL_VEC_ROW_SUM, &vec_sum_rows } }); + gemm.run(gemm_pack); + } + + return dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, GEMMLowpOutputStageInfo output_stage, + const int a_offset, const int b_offset) + { + TensorShape dst_shape = lhs_shape; + dst_shape[0] = rhs_shape[0]; + dst_shape[1] = lhs_shape[1]; + + // Create reference + SimpleTensor<T> lhs{ lhs_shape, data_type, 1, QuantizationInfo(1.0f / 255, a_offset) }; + SimpleTensor<T> rhs{ rhs_shape, data_type, 1, QuantizationInfo(1.0f / 255, b_offset) }; + SimpleTensor<int32_t> bias{ bias_shape, DataType::S32, 1 }; + SimpleTensor<int32_t> dst{ dst_shape, DataType::S32, 1 }; + SimpleTensor<T> dst_final{ dst_shape, data_type, 1 }; + + // Fill reference + fill(lhs, 0); + fill(rhs, 1); + fill(bias, 2); + + dst = reference::gemmlowp_matrix_multiply_core<int32_t, T>(lhs, rhs, dst_shape, a_offset, b_offset); + dst_final = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, T>(dst, bias, + output_stage.gemmlowp_multipliers, output_stage.gemmlowp_shifts, output_stage.gemmlowp_offset, output_stage.gemmlowp_min_bound, output_stage.gemmlowp_max_bound); + return dst_final; + } + + bool gemm_validated = true; + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; + +template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType> +class GEMMLowpMatrixMultiplyReshapedOnlyRHSMMULValidationFixture : public framework::Fixture +{ +public: + void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, + unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type) + { + GEMMLHSMatrixInfo lhs_info; + lhs_info.m0 = m0; + lhs_info.k0 = k0; + + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = n0; + rhs_info.k0 = k0; + rhs_info.h0 = h0; + rhs_info.interleave = interleave_rhs; + rhs_info.transpose = transpose_rhs; + + // Set the tensor shapes for LHS and RHS matrices + const TensorShape lhs_shape(k, m, batch_size); + const TensorShape rhs_shape(n, k, batch_size); + + _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type); + if(gemm_validated == true) + { + _reference = compute_reference(lhs_shape, rhs_shape, data_type); + } + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + switch(tensor.data_type()) + { + case DataType::QASYMM8: + { + // Between 1 and 254 in order to avoid having -128 and 128 for the DOT product path + std::uniform_int_distribution<> distribution(1, 254); + library->fill(tensor, distribution, i); + } + break; + case DataType::QASYMM8_SIGNED: + { + std::uniform_int_distribution<> distribution(-127, 126); + library->fill(tensor, distribution, i); + } + break; + default: + ARM_COMPUTE_ERROR("Unsupported data type"); + } + } + + TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, + const GEMMRHSMatrixInfo &rhs_info, DataType data_type) + { + // Create tensors + TensorType lhs = create_tensor<TensorType>(lhs_shape, data_type, 1); + TensorType rhs = create_tensor<TensorType>(rhs_shape, data_type, 1); + TensorType rhs_reshaped; + TensorType dst; + + const unsigned int M = lhs_shape[1]; + const unsigned int N = rhs_shape[0]; + const unsigned int K = lhs_shape[0]; + + GEMMKernelInfo gemm_info; + gemm_info.m = M; + gemm_info.n = N; + gemm_info.k = K; + gemm_info.lhs_info = lhs_info; + gemm_info.rhs_info = rhs_info; + // The output tensor will be auto-initialized within the function + + // Create and configure function + ReshapeRHSOperatorType reshape_rhs; + GEMMFunctionType gemm; + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + + // If GEMM is not validated, do not try to run. The validation will check + // if the technology supports this extension. If not, the test will be skipped. + // If it supports, the test will fail anyway because target and reference + // will not match. + gemm_validated = bool(gemm.validate(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info, nullptr, nullptr, nullptr)); + if(gemm_validated == true) + { + gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info, nullptr, nullptr, nullptr); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + + // Allocate tensors + lhs.allocator()->allocate(); + rhs.allocator()->allocate(); + rhs_reshaped.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + + // Fill tensors + fill(AccessorType(lhs), 0); + fill(AccessorType(rhs), 1); + + // Compute GEMM + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } }); + gemm.run(gemm_pack); + } + + return dst; + } + + SimpleTensor<int32_t> compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type) + { + TensorShape dst_shape = lhs_shape; + dst_shape[0] = rhs_shape[0]; + dst_shape[1] = lhs_shape[1]; + + if(data_type == DataType::QASYMM8) + { + // Create reference + SimpleTensor<uint8_t> lhs{ lhs_shape, data_type, 1 }; + SimpleTensor<uint8_t> rhs{ rhs_shape, data_type, 1 }; + SimpleTensor<int32_t> dst{ dst_shape, DataType::S32, 1 }; + + // Fill reference + fill(lhs, 0); + fill(rhs, 1); + + return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(lhs, rhs, dst_shape, 0, 0); + } + else + { + // Create reference + SimpleTensor<int8_t> lhs{ lhs_shape, data_type, 1 }; + SimpleTensor<int8_t> rhs{ rhs_shape, data_type, 1 }; + SimpleTensor<int32_t> dst{ dst_shape, DataType::S32, 1 }; + + // Fill reference + fill(lhs, 0); + fill(rhs, 1); + + return reference::gemmlowp_matrix_multiply_core<int32_t, int8_t>(lhs, rhs, dst_shape, 0, 0); + } + } + + bool gemm_validated = true; + TensorType _target{}; + SimpleTensor<int32_t> _reference{}; +}; + +template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType> class GEMMLowpMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0, bool interleave_rhs, bool transpose_rhs, DataType data_type) { @@ -1382,13 +2008,15 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - ReshapeRHSFunctionType reshape_rhs; + ReshapeRHSOperatorType reshape_rhs; GEMMFunctionType gemm; - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info); + reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info); + gemm.configure(lhs.info(), rhs_reshaped.info(), dst.info(), gemm_info); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst }); // Allocate tensors lhs.allocator()->allocate(); @@ -1396,18 +2024,20 @@ protected: rhs_reshaped.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs_reshaped.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); fill(AccessorType(rhs), 1); // Compute GEMM - reshape_rhs.run(); - gemm.run(); + ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } }; + reshape_rhs.run(reshape_rhs_pack); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs_reshaped }, { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1454,7 +2084,6 @@ template <typename TensorType, typename AccessorType, typename GEMMFunctionType> class GEMMLowpMatrixMultiplyNativeValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0) { GEMMLHSMatrixInfo lhs_info; @@ -1497,26 +2126,29 @@ protected: // Create and configure function GEMMFunctionType gemm; - gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K)); + gemm.configure(lhs.info(), rhs.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K)); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); + + add_padding_x({ &lhs, &rhs, &dst }); // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); fill(AccessorType(rhs), 1); // Compute GEMM - gemm.run(); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs }, { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1546,7 +2178,6 @@ template <typename TensorType, typename AccessorType, typename GEMMFunctionType> class GEMMLowpMatrixMultiplyNative3DValidationFixture : public framework::Fixture { public: - template <typename...> void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0) { GEMMLHSMatrixInfo lhs_info; @@ -1592,26 +2223,29 @@ protected: // Create and configure function GEMMFunctionType gemm; - gemm.configure(&lhs, &rhs, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h)); + gemm.configure(lhs.info(), rhs.info(), dst.info(), lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h)); + + ARM_COMPUTE_ASSERT(lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rhs.info()->is_resizable()); - ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + add_padding_x({ &lhs, &rhs, &dst }); // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(lhs), 0); fill(AccessorType(rhs), 1); // Compute GEMM - gemm.run(); + ITensorPack gemm_pack({ { ACL_SRC_0, &lhs }, { ACL_SRC_1, &rhs }, { ACL_DST, &dst } }); + gemm.run(gemm_pack); return dst; } @@ -1641,4 +2275,4 @@ protected: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_GEMMLOWPFIXTURE_H diff --git a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h index d0855093a7..d88029f93e 100644 --- a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h +++ b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,11 +46,10 @@ namespace validation { using namespace arm_compute::misc::shape_calculator; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool reinterpret_input_as_3d = false> +template <typename TensorType, typename AccessorType, typename OperatorType, typename T, bool reinterpret_input_as_3d = false> class GEMMReshapeLHSMatrixValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape_in, unsigned int batch_size, DataType data_type, unsigned int m0, unsigned int k0, unsigned int v0, bool interleave, bool transpose) { GEMMLHSMatrixInfo lhs_info; @@ -86,23 +85,26 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - FunctionType gemm_lhs_reshape; - gemm_lhs_reshape.configure(&src, &dst, lhs_info, reinterpret_input_as_3d); + OperatorType gemm_lhs_reshape; + gemm_lhs_reshape.configure(src.info(), dst.info(), lhs_info, reinterpret_input_as_3d); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + + add_padding_x({ &src, &dst }); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); // Compute GEMM LHS matrix reshape function - gemm_lhs_reshape.run(); + ITensorPack tensors = { { ACL_SRC, &src }, { ACL_DST, &dst } }; + gemm_lhs_reshape.run(tensors); return dst; } diff --git a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h index 99bfa3bced..0929faf04a 100644 --- a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h +++ b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,11 +46,10 @@ namespace validation { using namespace arm_compute::misc::shape_calculator; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename OperatorType, typename T> class GEMMReshapeRHSMatrixValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape_in, unsigned int batch_size, DataType data_type, unsigned int n0, unsigned int k0, unsigned int h0, bool interleave, bool transpose) { GEMMRHSMatrixInfo rhs_info; @@ -85,23 +84,26 @@ protected: // The output tensor will be auto-initialized within the function // Create and configure function - FunctionType gemm_rhs_reshape; - gemm_rhs_reshape.configure(&src, &dst, rhs_info); + OperatorType gemm_rhs_reshape; + gemm_rhs_reshape.configure(src.info(), dst.info(), rhs_info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + + add_padding_x({ &src, &dst }); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); // Compute GEMM RHS matrix reshape function - gemm_rhs_reshape.run(); + ITensorPack tensors = { { ACL_SRC, &src }, { ACL_DST, &dst } }; + gemm_rhs_reshape.run(tensors); return dst; } diff --git a/tests/validation/fixtures/GEMMTranspose1xWFixture.h b/tests/validation/fixtures/GEMMTranspose1xWFixture.h index 2d2e70697a..3765515b57 100644 --- a/tests/validation/fixtures/GEMMTranspose1xWFixture.h +++ b/tests/validation/fixtures/GEMMTranspose1xWFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class GEMMTranspose1xWValidationFixture : public framework::Fixture { public: - template <typename...> void setup(size_t x, size_t y, DataType data_type) { _data_type = data_type; @@ -89,24 +88,25 @@ protected: // Create and configure function FunctionType f; - f.configure(&a, &b); + f.configure(a.info(), b.info()); - ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); // Allocate tensors a.allocator()->allocate(); b.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); // Fill tensors fill(AccessorType(a), 0); fill(AccessorType(b), 1); // Compute GEMM function - f.run(); + ITensorPack tensors{ { ACL_SRC, &a }, { ACL_DST, &b } }; + f.run(tensors); return b; } diff --git a/tests/validation/fixtures/GatherFixture.h b/tests/validation/fixtures/GatherFixture.h index 0a9f8c1d15..857b0387b7 100644 --- a/tests/validation/fixtures/GatherFixture.h +++ b/tests/validation/fixtures/GatherFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class GatherFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, TensorShape indices_shape, int axis, DataType data_type) { _target = compute_target(input_shape, data_type, axis, indices_shape); @@ -67,9 +66,11 @@ protected: std::mt19937 gen(library->seed()); uint32_t *indices_ptr = static_cast<uint32_t *>(indices.data()); - std::uniform_int_distribution<uint32_t> dist_index(0, input_shape[actual_axis] - 1); - //Let's consider 1D indices - for(unsigned int ind = 0; ind < indices_shape[0]; ind++) + // 10% of the time the index is out-of-range. + uint32_t max_index = input_shape[actual_axis] + input_shape[actual_axis] / 9 + 1; + std::uniform_int_distribution<uint32_t> dist_index(0, max_index - 1); + + for(unsigned int ind = 0; ind < indices_shape.total_size(); ind++) { indices_ptr[ind] = dist_index(gen); } @@ -91,18 +92,18 @@ protected: FunctionType gather; gather.configure(&src, &indices_tensor, &dst, axis); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(indices_tensor.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(indices_tensor.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); indices_tensor.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!indices_tensor.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!indices_tensor.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/Im2ColFixture.h b/tests/validation/fixtures/Im2ColFixture.h index b6cf18bd4c..5c7978f4ab 100644 --- a/tests/validation/fixtures/Im2ColFixture.h +++ b/tests/validation/fixtures/Im2ColFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,10 +45,9 @@ namespace validation using namespace arm_compute::misc::shape_calculator; template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool batch_size_on_z> -class Im2ColValidationFixture : public framework::Fixture +class Im2ColOpValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, DataType data_type, const Size2D &kernel_dims, const PadStrideInfo &conv_info, const QuantizationInfo &quant_info, const DataLayout &data_layout, unsigned int num_groups) { @@ -88,23 +87,28 @@ protected: // Create and configure function FunctionType im2col_func; - im2col_func.configure(&src, &dst, _kernel_dims, _conv_info, _has_bias, Size2D(1U, 1U), _num_groups); + im2col_func.configure(src.info(), dst.info(), _kernel_dims, _conv_info, _has_bias, Size2D(1U, 1U), _num_groups); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); + arm_compute::ITensorPack pack = + { + { arm_compute::TensorType::ACL_SRC, &src }, + { arm_compute::TensorType::ACL_DST, &dst } + }; // Compute function - im2col_func.run(); + im2col_func.run(pack); return dst; } diff --git a/tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h b/tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h new file mode 100644 index 0000000000..7374093f51 --- /dev/null +++ b/tests/validation/fixtures/IndirectConv2dAddressPrecalculationFixture.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2022-2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_FIXTURE +#define ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "tests/Globals.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/reference/IndirectConv2dAddressPrecalculation.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +using namespace arm_compute::misc::shape_calculator; + +template <typename TensorType, typename AccessorType, typename OperatorType> +class IndirectConv2dAddressPrecalculationValidationFixture : public framework::Fixture +{ +public: + void setup(unsigned int src_w, + unsigned int src_h, + unsigned int src_b, + unsigned int wei_w, + unsigned int wei_h, + unsigned int pad, + unsigned int stride, + unsigned int m0) + { + DirectConvComputeKernelInfo desc; + desc.m0 = m0; + desc.n0 = 1; // Not used by the kernel + desc.k0 = 1; // Not used by the kernel + desc.export_weights_to_cl_image = false; // Not used by the kernel + + const PadStrideInfo conv_info(stride, stride, pad, pad); + + const TensorShape shape_conv_src(23, // The input channels are not used by the kernel + src_w, + src_h, + src_b); + + const TensorShape shape_conv_wei(23, // The input channels are not used by the kernel + wei_w, + wei_h, + 23 // The output channels are not used by the kernel + ); + + // The result of the kernel does not change with the datatype. Hence, we can fix it to Fp16 for validation purposes + const DataType data_type = DataType::F16; + + _target = compute_target(shape_conv_src, shape_conv_wei, data_type, conv_info, desc); + _reference = compute_reference(shape_conv_src, shape_conv_wei, data_type, conv_info, desc); + } + +protected: + TensorType compute_target(TensorShape shape_conv_src, TensorShape shape_conv_wei, DataType data_type, const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc) + { + TensorInfo src_conv_info(shape_conv_src, 1, data_type, DataLayout::NHWC); + TensorInfo wei_conv_info(shape_conv_wei, 1, data_type, DataLayout::NHWC); + TensorType dst; + + // The output tensor will be auto-initialized within the function + + // Create and configure function + OperatorType func; + func.configure(&src_conv_info, &wei_conv_info, dst.info(), conv_info, desc); + + add_padding_x({ &dst }); + + // Allocate tensors + dst.allocator()->allocate(); + + // Compute GEMM LHS matrix reshape function + ITensorPack tensors = { { ACL_DST, &dst } }; + func.run(tensors); + + return dst; + } + + SimpleTensor<int32_t> compute_reference(TensorShape shape_conv_src, TensorShape shape_conv_wei, DataType data_type, const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc) + { + ARM_COMPUTE_UNUSED(data_type); + TensorShape shape_out = compute_indirect_buffer_shape(shape_conv_src, DataLayout::NHWC, shape_conv_wei, conv_info, desc); + TensorShape output_conv_shape = compute_deep_convolution_shape(shape_conv_src, DataLayout::NHWC, shape_conv_wei, conv_info); + + return reference::indirect_conv2d_addr_precalculation(shape_conv_src, shape_conv_wei, output_conv_shape, shape_out, conv_info); + } + + TensorType _target{}; + SimpleTensor<int32_t> _reference{}; +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_INDIRECT_CONV2D_ADDRESS_PRECALCULATION_FIXTURE */
\ No newline at end of file diff --git a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h index 611d9aae5b..c26dd99f02 100644 --- a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h +++ b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class InstanceNormalizationLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type, DataLayout data_layout, bool in_place) { _target = compute_target(shape, data_type, data_layout, in_place); @@ -86,10 +85,10 @@ protected: FunctionType instance_norm_func; instance_norm_func.configure(&src, in_place ? nullptr : &dst, gamma, beta, epsilon); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); if(!in_place) { - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); } // Allocate tensors @@ -99,10 +98,10 @@ protected: dst.allocator()->allocate(); } - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); if(!in_place) { - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); } // Fill tensors diff --git a/tests/validation/fixtures/L2NormalizeLayerFixture.h b/tests/validation/fixtures/L2NormalizeLayerFixture.h index 349c0904eb..b8f4b1eaf3 100644 --- a/tests/validation/fixtures/L2NormalizeLayerFixture.h +++ b/tests/validation/fixtures/L2NormalizeLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -48,7 +48,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class L2NormalizeLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type, DataLayout data_layout, int axis, float epsilon) { _target = compute_target(shape, data_type, data_layout, axis, epsilon); @@ -81,15 +80,15 @@ protected: FunctionType l2_norm_func; l2_norm_func.configure(&src, &dst, axis, epsilon); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/LSTMLayerFixture.h b/tests/validation/fixtures/LSTMLayerFixture.h index 366d050039..a32e9adfe5 100644 --- a/tests/validation/fixtures/LSTMLayerFixture.h +++ b/tests/validation/fixtures/LSTMLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class LSTMLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, TensorShape input_weights_shape, TensorShape recurrent_weights_shape, TensorShape cell_bias_shape, TensorShape output_cell_shape, TensorShape output_shape, TensorShape scratch_shape, ActivationLayerInfo info, float cell_threshold, float projection_threshold, DataType data_type, bool projection_opt, bool peephole_opt, bool use_layer_norm) @@ -167,22 +166,22 @@ protected: &scratch, &output_state_out, &cell_state_out, &output, lstm_params, info, cell_threshold, projection_threshold); - ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(input_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(input_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(input_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(recurrent_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(recurrent_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(recurrent_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(forget_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(cell_bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output_state_in.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(cell_state_in.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(scratch.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output_state_out.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(cell_state_out.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(input_to_forget_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(input_to_cell_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(input_to_output_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(recurrent_to_forget_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(recurrent_to_cell_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(recurrent_to_output_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(forget_gate_bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(cell_bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output_gate_bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output_state_in.info()->is_resizable()); + ARM_COMPUTE_ASSERT(cell_state_in.info()->is_resizable()); + ARM_COMPUTE_ASSERT(scratch.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output_state_out.info()->is_resizable()); + ARM_COMPUTE_ASSERT(cell_state_out.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output.info()->is_resizable()); // Allocate tensors input.allocator()->allocate(); @@ -202,22 +201,22 @@ protected: cell_state_out.allocator()->allocate(); output.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!input_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!input_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!input_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!recurrent_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!recurrent_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!recurrent_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!forget_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!cell_bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output_state_in.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!cell_state_in.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!scratch.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output_state_out.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!cell_state_out.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!input_to_forget_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!input_to_cell_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!input_to_output_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!recurrent_to_forget_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!recurrent_to_cell_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!recurrent_to_output_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!forget_gate_bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!cell_bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output_gate_bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output_state_in.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!cell_state_in.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!scratch.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output_state_out.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!cell_state_out.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output.info()->is_resizable()); // Fill tensors fill(AccessorType(input), 0); @@ -236,18 +235,18 @@ protected: if(!cifg_opt) { - ARM_COMPUTE_EXPECT(input_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(recurrent_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(cell_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(input_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(input_to_input_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(recurrent_to_input_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(cell_to_input_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(input_gate_bias.info()->is_resizable()); input_to_input_w.allocator()->allocate(); recurrent_to_input_w.allocator()->allocate(); cell_to_input_w.allocator()->allocate(); input_gate_bias.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!input_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!recurrent_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!cell_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!input_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!input_to_input_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!recurrent_to_input_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!cell_to_input_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!input_gate_bias.info()->is_resizable()); fill(AccessorType(input_to_input_w), 13); fill(AccessorType(recurrent_to_input_w), 14); if(peephole_opt) @@ -260,26 +259,26 @@ protected: if(peephole_opt) { - ARM_COMPUTE_EXPECT(cell_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(cell_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(cell_to_forget_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(cell_to_output_w.info()->is_resizable()); cell_to_forget_w.allocator()->allocate(); cell_to_output_w.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!cell_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!cell_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!cell_to_forget_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!cell_to_output_w.info()->is_resizable()); fill(AccessorType(cell_to_forget_w), 18); fill(AccessorType(cell_to_output_w), 19); } if(projection_opt) { - ARM_COMPUTE_EXPECT(projection_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(projection_bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(projection_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(projection_bias.info()->is_resizable()); projection_w.allocator()->allocate(); projection_bias.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!projection_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!projection_bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!projection_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!projection_bias.info()->is_resizable()); fill(AccessorType(projection_w), 20); fill(AccessorType(projection_bias), 21); @@ -289,25 +288,25 @@ protected: { if(!cifg_opt) { - ARM_COMPUTE_EXPECT(input_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(input_layer_norm_w.info()->is_resizable()); input_layer_norm_w.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!input_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!input_layer_norm_w.info()->is_resizable()); fill(AccessorType(input_layer_norm_w), 22); } - ARM_COMPUTE_EXPECT(forget_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(cell_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(forget_layer_norm_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(cell_layer_norm_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output_layer_norm_w.info()->is_resizable()); forget_layer_norm_w.allocator()->allocate(); cell_layer_norm_w.allocator()->allocate(); output_layer_norm_w.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!forget_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!cell_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output_layer_norm_w.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!forget_layer_norm_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!cell_layer_norm_w.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output_layer_norm_w.info()->is_resizable()); fill(AccessorType(forget_layer_norm_w), 23); fill(AccessorType(cell_layer_norm_w), 24); @@ -458,7 +457,6 @@ protected: } input_gate = reference::activation_layer(input_gate, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); } - // Compute cell_state SimpleTensor<T> fully_connected_cell_state = reference::fully_connected_layer(input, input_to_cell_w, cell_bias, output_cell_shape); transposed_weights = reference::transpose(recurrent_to_cell_w); @@ -474,12 +472,13 @@ protected: fill(cell_bias, 8); cell_state_out = reference::arithmetic_operation(reference::ArithmeticOperation::ADD, cell_state_out, cell_bias, data_type, ConvertPolicy::SATURATE); } - cell_state_out = reference::activation_layer(cell_state_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); + cell_state_out = reference::activation_layer(cell_state_out, info); cell_state_out = reference::pixel_wise_multiplication<T, T, T>(cell_state_out, input_gate, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN, data_type); cell_state_out = reference::arithmetic_operation(reference::ArithmeticOperation::ADD, cell_state_out, pixelwise_mul, data_type, ConvertPolicy::SATURATE); + if(cell_threshold != 0.f) { - cell_state_out = reference::activation_layer(cell_state_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -cell_threshold, cell_threshold)); + cell_state_out = reference::activation_layer(cell_state_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, cell_threshold, -cell_threshold)); } // Compute output @@ -515,7 +514,6 @@ protected: output_state_out = reference::activation_layer(fully_connected_projection, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -projection_threshold, projection_threshold)); } } - std::vector<SimpleTensor<T>> scratch_inputs; if(!cifg_opt) { diff --git a/tests/validation/fixtures/LogicalFixture.h b/tests/validation/fixtures/LogicalFixture.h index 9f64d89d10..60dc963ba7 100644 --- a/tests/validation/fixtures/LogicalFixture.h +++ b/tests/validation/fixtures/LogicalFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Arm Limited. + * Copyright (c) 2020-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -60,9 +60,9 @@ protected: { for(auto t : tensors) { - ARM_COMPUTE_EXPECT(t->info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(t->info()->is_resizable()); t->allocator()->allocate(); - ARM_COMPUTE_EXPECT(!t->info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!t->info()->is_resizable()); } } @@ -79,7 +79,6 @@ class LogicalBinaryOperationValidationFixture : public LogicalOperationValidatio using Parent = LogicalOperationValidationFixtureBase<TensorType, AccessorType, FunctionType, T>; public: - template <typename...> void setup(TensorShape shape0, TensorShape shape1) { Parent::_target = compute_target(shape0, shape1); @@ -135,7 +134,6 @@ class LogicalNotValidationFixture : public LogicalOperationValidationFixtureBase using Parent = LogicalOperationValidationFixtureBase<TensorType, AccessorType, FunctionType, T>; public: - template <typename...> void setup(TensorShape shape, DataType data_type) { Parent::_target = compute_target(shape, data_type); diff --git a/tests/validation/fixtures/MatMulFixture.h b/tests/validation/fixtures/MatMulFixture.h new file mode 100644 index 0000000000..ffd12e56d0 --- /dev/null +++ b/tests/validation/fixtures/MatMulFixture.h @@ -0,0 +1,612 @@ +/* + * Copyright (c) 2023-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_MATMULFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_MATMULFIXTURE_H + +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" + +#include "src/core/utils/quantization/AsymmHelpers.h" +#include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT +#include "tests/framework/Fixture.h" +#include "tests/validation/reference/ActivationLayer.h" +#include "tests/validation/reference/GEMM.h" +#include "tests/validation/reference/GEMMLowp.h" +#include "tests/validation/reference/Permute.h" +#include "tests/validation/reference/ReshapeLayer.h" +#include "tests/validation/Validation.h" + +#include <limits> +#include <random> +#include <type_traits> + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class MatMulGenericValidationFixture : public framework::Fixture +{ +public: + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs, + Settings settings, + QuantizationInfo a_qinfo = QuantizationInfo(), + QuantizationInfo b_qinfo = QuantizationInfo(), + QuantizationInfo o_qinfo = QuantizationInfo()) + { + // For brevity, the input shapes are assumed to be not-transposed for both a and b matrices. + if (transpose_a) + { + permute(shape_a, PermutationVector(1U, 0U)); + } + if (transpose_b) + { + permute(shape_b, PermutationVector(1U, 0U)); + } + + _target = compute_target(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, + num_extra_runs, settings, a_qinfo, b_qinfo, o_qinfo); + _reference = compute_reference(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, + a_qinfo, b_qinfo, o_qinfo); + } + +protected: + template <typename U> + void fill(U &&tensor, int i, float lo = -1.f, float hi = 1.f) + { + switch (tensor.data_type()) + { + case DataType::BFLOAT16: + { + arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{float(lo), float(hi)}; + library->fill(tensor, distribution, i); + break; + } + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{float(lo), float(hi)}; + library->fill(tensor, distribution, i); + break; + } + case DataType::F32: + { + std::uniform_real_distribution<float> distribution(lo, hi); + library->fill(tensor, distribution, i); + break; + } + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + { + library->fill_tensor_uniform(tensor, i); + break; + } + default: + { + ARM_COMPUTE_ERROR("Unsupported data type."); + } + } + } + + virtual TensorType compute_target(const TensorShape &shape_a, + const TensorShape &shape_b, + const TensorShape &output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs, + const Settings &settings, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) + { + // 1. Create Classes and configure function + // ---------------------------------------------------- + // Create tensors + // Configure relevant classes and matmul function + TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, a_qinfo); + TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, b_qinfo); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, o_qinfo); + + FunctionType matmul; + + // Configure MatMulInfo class + MatMulInfo mm_info; + mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b); + + // Ensure values are dynamic + a.info()->set_are_values_constant(false); + b.info()->set_are_values_constant(false); + + // Configure operator + matmul.configure(&a, &b, &dst, mm_info, settings, act_info); + + // Assertions + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + // Allocate tensors + a.allocator()->allocate(); + b.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + + // For multiple runs. + for (int i = 0; i < num_extra_runs; i++) + { + // Stress dynamic tensors by running multiple times. + // -------------------------------------------------------- + // Fill tensors with new seed + // Run function + const int seed_offset = num_extra_runs * 100; + fill(AccessorType(a), seed_offset); + fill(AccessorType(b), seed_offset + 1); + + matmul.run(); + } + + // 2. Final Run for reference comparison + // -------------------------------------------------------- + // Re-fill tensors same seed as reference run + // Compute MatMul operation + fill(AccessorType(a), 2); + fill(AccessorType(b), 3); + + matmul.run(); + + return dst; + } + + template <typename TT> + typename std::enable_if < !std::is_integral<TT>::value, SimpleTensor<TT >>::type + compute_reference_gemm(const SimpleTensor<TT> &a, + const SimpleTensor<TT> &b, + const SimpleTensor<TT> &c, + float alpha, + float beta, + const QuantizationInfo &o_qinfo) + { + ARM_COMPUTE_UNUSED(o_qinfo); + + return reference::gemm(a, b, c, alpha, beta); + } + + template <typename TT> + typename std::enable_if<std::is_integral<TT>::value, SimpleTensor<TT>>::type + compute_reference_gemm(const SimpleTensor<TT> &a, + const SimpleTensor<TT> &b, + const SimpleTensor<TT> &c, + float alpha, + float beta, + const QuantizationInfo &o_qinfo) + { + ARM_COMPUTE_UNUSED(alpha, beta); + + const auto aq = a.quantization_info().uniform(); + const auto bq = b.quantization_info().uniform(); + const auto oq = o_qinfo.uniform(); + + const auto multiplier = aq.scale * bq.scale / oq.scale; + + int32_t output_multiplier = 0; + int32_t output_shift = 0; + quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift); + std::vector<int32_t> output_multipliers{output_multiplier}; + std::vector<int32_t> output_shifts{output_shift}; + + //The lhs and rhs offsets are negated here to keep the reference aligned with the function implementation where the lhs and rhs offsets are also negated. + const auto tmp = reference::gemmlowp_matrix_multiply_core<int32_t>(a, b, c.shape(), -aq.offset, -bq.offset); + + auto output = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TT>( + tmp, output_multipliers, output_shifts, oq.offset, std::numeric_limits<int32_t>::lowest(), + std::numeric_limits<int32_t>::max()); + output.quantization_info(o_qinfo); + + return output; + } + + SimpleTensor<T> compute_reference(const TensorShape &a_shape, + const TensorShape &b_shape, + const TensorShape &output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) + { + // We collapse dimensions > 2 onto dimension 2, i.e. 4D+ tensors will look like 3D + // This is necessary unless we choose to extend gemm reference for 4D+ tensors + TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimZ); + TensorShape a_shape_collapsed = a_shape.collapsed_from(Window::DimZ); + TensorShape b_shape_collapsed = b_shape.collapsed_from(Window::DimZ); + + // Create reference + SimpleTensor<T> a{a_shape_collapsed, data_type, 1, a_qinfo}; + SimpleTensor<T> b{b_shape_collapsed, data_type, 1, b_qinfo}; + SimpleTensor<T> c{output_shape_collapsed, data_type, 1}; + + // Fill reference + fill(a, 2); + fill(b, 3); + + /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if transpose_a is set to true, then A is assumed to be (B x K x M), + therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K) + in order to be able to call reference implementation that works with (B x M x K) input. + Similarly, if transpose_b is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */ + + // Define transposed shapes + TensorShape a_transposed_shape(a.shape()); + a_transposed_shape.set(0, a.shape().y()); + a_transposed_shape.set(1, a.shape().x()); + + TensorShape b_transposed_shape(b.shape()); + b_transposed_shape.set(0, b.shape().y()); + b_transposed_shape.set(1, b.shape().x()); + + // Define transposed tensors + SimpleTensor<T> a_transposed{a_transposed_shape, data_type}; + SimpleTensor<T> b_transposed{b_transposed_shape, data_type}; + + // pretranspose a if necessary + if (transpose_a) + { + a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U)); + } + // pretranspose b if necessary + if (transpose_b) + { + b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U)); + } + + // Setting beta to 0 will effectively disable C for the + // computation of the reference: alpha * A * B + 0 * C + // Use transposed tensors if boolean enabled else use original tensors + auto result = compute_reference_gemm<T>((transpose_a) ? a_transposed : a, (transpose_b) ? b_transposed : b, c, + 1.0f, 0.f, o_qinfo); + + result = reference::activation_layer<T>(result, act_info, o_qinfo); + + // We reshape the gemm output back if the tensor is high dimensional + if (output_shape_collapsed != output_shape) + { + result = reference::reshape_layer(result, output_shape); + } + + return result; + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; + +/// TODO: (ONCPUML-1451) The current state of this fixture is interim and a longer-term testing method will be implemented later. +/// @note: Currently we support only a 2x2 test due to the lack of reorder ref. implementation. +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class MatMulFixedFormatFixture + : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T> +{ +public: + TensorType compute_target(const TensorShape &shape_a, + const TensorShape &shape_b, + const TensorShape &output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs, + const Settings &settings, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) override + { + // 1. Create Classes and configure function + // ---------------------------------------------------- + // Create tensors + // Configure relevant classes and matmul function + TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, a_qinfo); + TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, b_qinfo); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, o_qinfo); + + const auto weight_tensor_info = TensorInfo(*b.info()); + const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info); + TensorType weights_transformed = create_tensor<TensorType>(new_tensor_info); + + // Configure MatMulInfo class + MatMulInfo mm_info; + mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b); + + // Ensure values are dynamic + a.info()->set_are_values_constant(false); + b.info()->set_are_values_constant(false); + weights_transformed.info()->set_are_values_constant(false); + + FunctionType matmul; + + // Configure operator + matmul.configure(&a, &weights_transformed, &dst, mm_info, settings, act_info); + + // Assertions + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights_transformed.info()->is_resizable()); + + // Allocate tensors + a.allocator()->allocate(); + b.allocator()->allocate(); + dst.allocator()->allocate(); + weights_transformed.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights_transformed.info()->is_resizable()); + + // For multiple runs. + for (int i = 0; i < num_extra_runs; i++) + { + // Stress dynamic tensors by running multiple times. + // -------------------------------------------------------- + // Fill tensors with new seed + // Run function + const int seed_offset = num_extra_runs * 100; + this->fill(AccessorType(a), seed_offset); + this->fill(AccessorType(b), seed_offset + 1); + + matmul.run(); + } + + // 2. Final Run for reference comparison + // -------------------------------------------------------- + // Re-fill tensors same seed as reference run + // Compute MatMul operation + this->fill(AccessorType(a), 2); + this->fill(AccessorType(b), 3); + + rearrange_data(AccessorType(b), AccessorType(weights_transformed)); + + matmul.run(); + + return dst; + } + + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs, + Settings settings, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) + { + if (CPUInfo::get().has_bf16()) + { + MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, settings, + a_qinfo, b_qinfo, o_qinfo); + } + } + +private: + TensorInfo prepare_weights(const TensorInfo tensor_info) + { + const DataLayout data_layout = tensor_info.data_layout(); + ARM_COMPUTE_EXPECT(data_layout == DataLayout::NCHW, framework::LogLevel::ERRORS); + const DataType data_type = tensor_info.data_type(); + const TensorShape tensor_shape = tensor_info.tensor_shape(); + const int H = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)]; + const int W = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)]; + ARM_COMPUTE_EXPECT(H <= 2 && W <= 2, framework::LogLevel::ERRORS); + + arm_compute::Strides strides_in_bytes = tensor_info.strides_in_bytes(); + strides_in_bytes.set(1, 32); + strides_in_bytes.set(2, 32); + + const size_t offset_first_element_in_bytes = tensor_info.offset_first_element_in_bytes(); + const size_t total_size_in_bytes = 32; + + const TensorShape TS(H, W); + + TensorInfo new_tensor_info = tensor_info; + new_tensor_info.init(TS, tensor_info.num_channels(), data_type, strides_in_bytes, offset_first_element_in_bytes, + total_size_in_bytes); + + return new_tensor_info; + } + + void rearrange_data(const AccessorType src, AccessorType dst) + { + const TensorShape src_tensor_shape = src.shape(); + const DataLayout data_layout = src.data_layout(); + ARM_COMPUTE_EXPECT(data_layout == DataLayout::NCHW, framework::LogLevel::ERRORS); + const unsigned int O = + src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O + const unsigned int H = + src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)]; + const unsigned int W = + src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)]; + const unsigned int I = + src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I + ARM_COMPUTE_EXPECT(H <= 2 && W <= 2, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(I == 1 && O == 1, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(src.num_elements() <= dst.num_elements(), framework::LogLevel::ERRORS); + + const T *src_ptr = reinterpret_cast<const T *>(src.data()); + T *dst_ptr = reinterpret_cast<T *>(dst.data()); + + // rearrange indexes for 2x2 input and weight + int dst_idx[] = {0, 4, 1, 5}; + for (int i = 0; i < 4; i++) + { + dst_ptr[dst_idx[i]] = src_ptr[i]; + } + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class MatMulValidationFixture + : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T> +{ +public: + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type) + { + MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, ActivationLayerInfo(), 0, Settings()); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class MatMulValidationWithDynamicTensorsFixture + : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T> +{ +public: + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs) + { + MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings()); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class QuantizedMatMulValidationFixture + : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T> +{ +public: + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) + { + MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(), + a_qinfo, b_qinfo, o_qinfo); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class MatMulValidationWithActivationFixture + : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T> +{ +public: + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info) + { + MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings()); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class MatMulValidationWithActivationAlphaBetaFixture + : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T> +{ +public: + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo::ActivationFunction function, + float alpha_beta) + { + ActivationLayerInfo act_info(function, alpha_beta, alpha_beta); + MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings()); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class QuantizedMatMulValidationWithActivationFixture + : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T> +{ +public: + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo::ActivationFunction function, + float alpha_beta, + int num_extra_runs, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) + { + ActivationLayerInfo act_info(function, alpha_beta, alpha_beta); + MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(), + a_qinfo, b_qinfo, o_qinfo); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_MATMULFIXTURE_H diff --git a/tests/validation/fixtures/MatMulKernelFixture.h b/tests/validation/fixtures/MatMulKernelFixture.h new file mode 100644 index 0000000000..26072dff65 --- /dev/null +++ b/tests/validation/fixtures/MatMulKernelFixture.h @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_MATMULKERNELFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_MATMULKERNELFIXTURE_H + +#include "arm_compute/core/KernelDescriptors.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" + +#include "tests/CL/CLAccessor.h" +#include "tests/CL/Helper.h" +#include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT +#include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/Validation.h" +#include "tests/validation/reference/GEMM.h" +#include "tests/validation/reference/GEMMLowp.h" +#include "tests/validation/reference/Permute.h" +#include "tests/validation/reference/ReshapeLayer.h" +#include <cmath> +#include <random> + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +using namespace arm_compute::opencl::kernels; + +template <typename T, typename KernelType, bool use_mmul = false> +class MatMulKernelGenericValidationFixture : public framework::Fixture +{ +public: + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type, + bool enable_bias) + { + // This hash is used by random generators. There may be hash collisions but + // this is intentional as it's a very easy way to make the the current + // random generation process almost different for many test configurations, + // which were using the same set of values before. + _hash = M0 + N0 + K0 + shape_a[0] + shape_a[1] + shape_b[0] + shape_b[1] + enable_bias + export_rhs_to_cl_image; + + // Flag to create a bias + _enable_bias = enable_bias; + + // For brevity, the input shapes are assumed to be not-transposed for both Lhs and Rhs matrices. + QuantizationInfo lhs_q_info; + QuantizationInfo rhs_q_info; + QuantizationInfo dst_q_info; + + if(is_data_type_quantized(data_type)) + { + const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max()); + const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min()); + + std::mt19937 generator(library->seed() + _hash); + std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f); + std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max); + + const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3] + + const int32_t offset_lhs = distribution_t(generator); + const int32_t offset_rhs = distribution_t(generator); + + lhs_q_info = QuantizationInfo(scale_lhs, offset_lhs); + rhs_q_info = QuantizationInfo(scale_rhs, offset_rhs); + + const int m = shape_a.y(); + const int n = shape_b.x(); + const int k = shape_a.x(); + + const float bias_fraction = enable_bias ? 0.5f : 0.f; + + QuantizationHint q_hint = suggest_matmul_dst_q_info_and_bias(lhs_q_info, rhs_q_info, m, n, k, data_type, bias_fraction); + dst_q_info = q_hint.q_info; + _min_bias = q_hint.bias_min; + _max_bias = q_hint.bias_max; + } + + if(pretranspose_a) + { + permute(shape_a, PermutationVector(1U, 0U)); + } + + if(pretranspose_b) + { + permute(shape_b, PermutationVector(1U, 0U)); + } + + // Skip configurations unsupported by the device. + _device_supports_export_to_cl_image = image2d_from_buffer_supported(CLKernelLibrary::get().get_device()); + if(!_device_supports_export_to_cl_image && export_rhs_to_cl_image) + { + ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped"); + framework::ARM_COMPUTE_PRINT_INFO(); + return; // Note: Also need to skip the validate in corresponding FIXTURE_DATA_TEST_CASEs. + } + + _device_supports_mmul = arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()); + if(!_device_supports_mmul && use_mmul) + { + ARM_COMPUTE_TEST_INFO("cl_arm_matrix_multiply not supported. TEST skipped"); + framework::ARM_COMPUTE_PRINT_INFO(); + return; // Note: Also need to skip the validate in corresponding FIXTURE_DATA_TEST_CASEs. + } + + _target = compute_target(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type, lhs_q_info, rhs_q_info, dst_q_info); + _reference = compute_reference(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type, lhs_q_info, rhs_q_info, dst_q_info); + } + +protected: + template <typename U> + void fill(U &&tensor, int i, float lo = -1.f, float hi = 1.f) + { + switch(tensor.data_type()) + { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(lo), float(hi) }; + library->fill(tensor, distribution, i); + break; + } + case DataType::F32: + { + std::uniform_real_distribution<float> distribution(lo, hi); + library->fill(tensor, distribution, i); + break; + } + default: + library->fill_tensor_uniform(tensor, i); + } + } + + template <typename U> + void fill_bias_s32(U &&tensor, int i, int32_t min, int32_t max) + { + std::uniform_int_distribution<int32_t> distribution(min, max); + library->fill(tensor, distribution, i); + } + + template <typename U, typename D> + void fill_constant(U &&tensor, D value) + { + library->fill_tensor_value(tensor, value); + } + + CLTensor compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, const int M0, const int N0, const int K0, + bool export_rhs_to_cl_image, DataType data_type, const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, const QuantizationInfo &dst_q_info) + { + CLSynthetizeOperator<KernelType> matMul{}; + MatMulKernelInfo matmul_info; + matmul_info.adj_lhs = pretranspose_a; + matmul_info.adj_rhs = pretranspose_b; + matmul_info.m0 = M0; + matmul_info.n0 = N0; + matmul_info.k0 = K0; + matmul_info.export_rhs_to_cl_image = export_rhs_to_cl_image; + + bool is_quantized = is_data_type_quantized(data_type); + + // Create tensors + CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, lhs_q_info); + CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, rhs_q_info); + CLTensor bias = create_tensor<CLTensor>(output_shape[0], (is_quantized) ? DataType::S32 : data_type, 1, dst_q_info); + CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, dst_q_info); + + matMul.configure(a.info(), b.info(), (_enable_bias) ? bias.info() : nullptr, dst.info(), matmul_info); + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + // Allocate tensors + a.allocator()->allocate(); + b.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + + // Fill tensors + fill(CLAccessor(a), _hash + 1); + fill(CLAccessor(b), _hash + 2); + + // Compute matMul kernel + ITensorPack tensors_pack({ { ACL_SRC_0, &a }, + { ACL_SRC_1, &b }, + { ACL_DST, &dst } + }); + + if(_enable_bias) + { + // Allocate, fill and add bias to TensorPack obj + bias.allocator()->allocate(); + if(is_quantized) + { + fill_bias_s32(CLAccessor(bias), _hash + 3, _min_bias, _max_bias); + } + else + { + fill(CLAccessor(bias), _hash + 3); + } + tensors_pack.add_tensor(ACL_SRC_2, &bias); + } + + matMul.run(tensors_pack); + + return dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type, + const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, const QuantizationInfo &dst_q_info) + { + // We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 4D + // This is necessary unless we choose to extend gemm reference for 5D+ tensors + TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimZ); + TensorShape shape_a_collapsed = shape_a.collapsed_from(Window::DimZ); + TensorShape shape_b_collapsed = shape_b.collapsed_from(Window::DimZ); + + // Create reference + SimpleTensor<T> a{ shape_a_collapsed, data_type, 1, lhs_q_info }; + SimpleTensor<T> b{ shape_b_collapsed, data_type, 1, rhs_q_info }; + SimpleTensor<T> c{ output_shape_collapsed, data_type, 1, dst_q_info }; + + // Fill reference + fill(a, _hash + 1); + fill(b, _hash + 2); + + /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M), + therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K) + in order to be able to call reference implementation that works with (B x M x K) input. + Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */ + + // Define transposed shapes + TensorShape a_transposed_shape(a.shape()); + a_transposed_shape.set(0, a.shape().y()); + a_transposed_shape.set(1, a.shape().x()); + + TensorShape b_transposed_shape(b.shape()); + b_transposed_shape.set(0, b.shape().y()); + b_transposed_shape.set(1, b.shape().x()); + + // Define transposed tensors + SimpleTensor<T> a_transposed{ a_transposed_shape, data_type }; + SimpleTensor<T> b_transposed{ b_transposed_shape, data_type }; + + // pretranspose a if necessary + if(pretranspose_a) + { + a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U)); + } + + // pretranspose b if necessary + if(pretranspose_b) + { + b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U)); + } + + // Use transposed tensors if boolean enabled else use original tensors + SimpleTensor<T> result = gemm_reference<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c); + + // We reshape the gemm output back if the tensor is high dimensional + if(output_shape_collapsed != output_shape) + { + result = reference::reshape_layer(result, output_shape); + } + + return result; + } + + template <typename U = T> + typename std::enable_if < std::is_same<U, float>::value || std::is_same<U, half>::value, SimpleTensor<U >>::type gemm_reference(SimpleTensor<U> &a, SimpleTensor<U> &b, SimpleTensor<U> &c) + { + // Fill bias, then copy first dimension into subsequent dimensions to mimic broadcast + // of bias tensor from shape [dst.dimension(0)] to [dst.tensor_shape()] in target kernel + if(_enable_bias) + { + fill(c, _hash + 3); + const int n = c.shape().x(); + const int other_dims = c.shape().collapsed_from(1)[1]; + for(int i = 1; i < other_dims; ++i) // For all data, copy first n elements into remaining batches + { + memcpy(c.data() + i * n, c.data(), n * sizeof(T)); + } + } + // Setting beta to 0 will effectively disable C for the + // computation of the reference: alpha * A * B + 0 * C + return reference::gemm<U>(a, b, c, 1.0f, (_enable_bias) ? 1.0f : 0.f); + } + + template <typename U = T> + typename std::enable_if < std::is_same<U, int8_t>::value || std::is_same<U, uint8_t>::value, SimpleTensor<U >>::type gemm_reference(SimpleTensor<U> &a, SimpleTensor<U> &b, SimpleTensor<U> &c) + { + const UniformQuantizationInfo aq = a.quantization_info().uniform(); + const UniformQuantizationInfo bq = b.quantization_info().uniform(); + const UniformQuantizationInfo cq = c.quantization_info().uniform(); + + const SimpleTensor<int32_t> result = reference::gemmlowp_matrix_multiply_core<int32_t, U, U>(a, b, c.shape(), -aq.offset, -bq.offset); + + std::vector<int32_t> gemmlowp_multipliers{ 1 }; + std::vector<int32_t> gemmlowp_shifts{ 1 }; + const int gemmlowp_offset = cq.offset; + const float scale = aq.scale * bq.scale / cq.scale; + + quantization::calculate_quantized_multiplier(scale, &gemmlowp_multipliers[0], &gemmlowp_shifts[0]); + constexpr int32_t gemmlowp_min_bound = std::numeric_limits<int32_t>::min(); + constexpr int32_t gemmlowp_max_bound = std::numeric_limits<int32_t>::max(); + + SimpleTensor<int> bias{ c.shape(), DataType::S32 }; + if(_enable_bias) + { + // Identical to float implementation, fill and copy values of bias first dimension + fill_bias_s32(bias, _hash + 3, _min_bias, _max_bias); + const int n = bias.shape().x(); + const int other_dims = bias.shape().collapsed_from(1)[1]; + const unsigned int dt_size = sizeof(int32_t); + for(int i = 1; i < other_dims; ++i) + { + memcpy(bias.data() + i * n, bias.data(), n * dt_size); + } + } + else + { + fill_constant(bias, static_cast<int32_t>(0)); // effectively disable bias + } + + const SimpleTensor<U> final_result = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, U>(result, bias, + gemmlowp_multipliers, gemmlowp_shifts, gemmlowp_offset, gemmlowp_min_bound, gemmlowp_max_bound); + + return final_result; + } + + CLTensor _target{}; + SimpleTensor<T> _reference{}; + bool _enable_bias{ false }; + bool _device_supports_export_to_cl_image{ true }; + bool _device_supports_mmul{ true }; + int32_t _min_bias{ 0 }; + int32_t _max_bias{ 0 }; + int32_t _hash{ 0 }; +}; + +template <typename T, typename KernelType, bool use_mmul = false> +class MatMulKernelValidationFixture : public MatMulKernelGenericValidationFixture<T, KernelType, use_mmul> +{ +public: + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type) + { + MatMulKernelGenericValidationFixture<T, KernelType, use_mmul>::setup(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type, + false /* enable bias */); + } +}; + +template <typename T, typename KernelType, bool use_mmul = false> +class MatMulKernelWithBiasValidation : public MatMulKernelGenericValidationFixture<T, KernelType, use_mmul> +{ +public: + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, int M0, int N0, int K0, bool export_rhs_to_cl_image, DataType data_type) + { + MatMulKernelGenericValidationFixture<T, KernelType, use_mmul>::setup(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, export_rhs_to_cl_image, data_type, + true /* enable bias */); + } +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_MATMULKERNELFIXTURE_H diff --git a/tests/validation/fixtures/MaxUnpoolingLayerFixture.h b/tests/validation/fixtures/MaxUnpoolingLayerFixture.h index 7c118da319..808e3ffabd 100644 --- a/tests/validation/fixtures/MaxUnpoolingLayerFixture.h +++ b/tests/validation/fixtures/MaxUnpoolingLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021 Arm Limited. + * Copyright (c) 2020-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename PoolingFunctionTy class MaxUnpoolingLayerValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout) { std::mt19937 gen(library->seed()); @@ -106,9 +105,9 @@ protected: MaxUnpoolingFunctionType unpool_layer; unpool_layer.configure(&dst, &indices, &unpooled, pool_info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(indices.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(indices.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); @@ -116,10 +115,10 @@ protected: indices.allocator()->allocate(); unpooled.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!indices.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!unpooled.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!indices.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!unpooled.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -150,7 +149,6 @@ template <typename TensorType, typename AccessorType, typename F1, typename F2, class MaxUnpoolingLayerValidationFixture : public MaxUnpoolingLayerValidationGenericFixture<TensorType, AccessorType, F1, F2, T> { public: - template <typename...> void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, DataType data_type, DataLayout data_layout) { MaxUnpoolingLayerValidationGenericFixture<TensorType, AccessorType, F1, F2, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, true), diff --git a/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h b/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h index 5d11d1f8e2..bf5d20790c 100644 --- a/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h +++ b/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,30 +44,35 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class MeanStdDevNormalizationLayerValidationFixture : public framework::Fixture { public: - template <typename...> - void setup(TensorShape shape, DataType dt, bool in_place, float epsilon = 1e-8f) + void setup(TensorShape shape, DataType dt, bool in_place, float epsilon = 1e-8) { - _data_type = dt; - _target = compute_target(shape, dt, in_place, epsilon); - _reference = compute_reference(shape, dt, epsilon); + QuantizationInfo qi = QuantizationInfo(0.5f, 10); + _data_type = dt; + _target = compute_target(shape, dt, in_place, epsilon, qi); + _reference = compute_reference(shape, dt, epsilon, qi); } protected: template <typename U> - void fill(U &&src_tensor) + void fill(U &&tensor) { - static_assert(std::is_floating_point<T>::value || std::is_same<T, half>::value, "Only floating point data types supported."); - using DistributionType = typename std::conditional<std::is_same<T, half>::value, arm_compute::utils::uniform_real_distribution_16bit<T>, std::uniform_real_distribution<T>>::type; - - DistributionType distribution{ T(-1.0f), T(1.0f) }; - library->fill(src_tensor, distribution, 0); + if(is_data_type_float(_data_type)) + { + std::uniform_real_distribution<> distribution{ -1.0f, 1.0f }; + library->fill(tensor, distribution, 0); + } + else + { + std::uniform_int_distribution<> distribution{ 0, 255 }; + library->fill(tensor, distribution, 0); + } } - TensorType compute_target(TensorShape shape, DataType dt, bool in_place, float epsilon) + TensorType compute_target(TensorShape shape, DataType dt, bool in_place, float epsilon, QuantizationInfo qi) { // Create tensors - TensorType src = create_tensor<TensorType>(shape, dt, 1); - TensorType dst; + TensorType src = create_tensor<TensorType>(shape, dt, 1, qi); + TensorType dst = create_tensor<TensorType>(shape, dt, 1, qi); TensorType *dst_ptr = in_place ? &src : &dst; @@ -75,17 +80,17 @@ protected: FunctionType norm; norm.configure(&src, dst_ptr, epsilon); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); if(!in_place) { dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); } // Fill tensors @@ -104,10 +109,10 @@ protected: } } - SimpleTensor<T> compute_reference(const TensorShape &shape, DataType dt, float epsilon) + SimpleTensor<T> compute_reference(const TensorShape &shape, DataType dt, float epsilon, QuantizationInfo qi) { // Create reference - SimpleTensor<T> ref_src{ shape, dt, 1 }; + SimpleTensor<T> ref_src{ shape, dt, 1, qi }; // Fill reference fill(ref_src); @@ -119,6 +124,7 @@ protected: SimpleTensor<T> _reference{}; DataType _data_type{}; }; + } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/fixtures/NonMaxSuppressionFixture.h b/tests/validation/fixtures/NonMaxSuppressionFixture.h index 6d5fc437ce..043b4731aa 100644 --- a/tests/validation/fixtures/NonMaxSuppressionFixture.h +++ b/tests/validation/fixtures/NonMaxSuppressionFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType> class NMSValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, unsigned int max_output_size, float score_threshold, float nms_threshold) { ARM_COMPUTE_ERROR_ON(max_output_size == 0); @@ -77,18 +76,18 @@ protected: FunctionType nms_func; nms_func.configure(&bboxes, &scores, &indices, max_output_size, score_threshold, nms_threshold); - ARM_COMPUTE_EXPECT(bboxes.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(indices.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(scores.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(bboxes.info()->is_resizable()); + ARM_COMPUTE_ASSERT(indices.info()->is_resizable()); + ARM_COMPUTE_ASSERT(scores.info()->is_resizable()); // Allocate tensors bboxes.allocator()->allocate(); indices.allocator()->allocate(); scores.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!bboxes.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!indices.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!scores.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!bboxes.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!indices.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!scores.info()->is_resizable()); // Fill tensors fill(AccessorType(bboxes), 0, 0.f, 1.f); diff --git a/tests/validation/fixtures/NormalizationLayerFixture.h b/tests/validation/fixtures/NormalizationLayerFixture.h index 54570de64f..ddaa3533f5 100644 --- a/tests/validation/fixtures/NormalizationLayerFixture.h +++ b/tests/validation/fixtures/NormalizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class NormalizationValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type, DataLayout data_layout) { NormalizationLayerInfo info(norm_type, norm_size, 5, beta, 1.f, is_scaled); @@ -81,15 +80,15 @@ protected: FunctionType norm_layer; norm_layer.configure(&src, &dst, info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -119,7 +118,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class NormalizationValidationFixture : public NormalizationValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type, DataLayout data_layout) { NormalizationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, norm_type, norm_size, beta, is_scaled, data_type, data_layout); diff --git a/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h b/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h index 3249ccc4bb..5f2c865950 100644 --- a/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h +++ b/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class NormalizePlanarYUVLayerValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape0, TensorShape shape1, DataType dt, DataLayout data_layout, QuantizationInfo quantization_info) { _data_type = dt; @@ -97,10 +96,10 @@ protected: FunctionType norm; norm.configure(&src, &dst, &mean, &std); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(mean.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(std.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(mean.info()->is_resizable()); + ARM_COMPUTE_ASSERT(std.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); @@ -108,10 +107,10 @@ protected: mean.allocator()->allocate(); std.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!mean.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!std.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!mean.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!std.info()->is_resizable()); // Fill tensors fill(AccessorType(src), AccessorType(mean), AccessorType(std)); @@ -144,7 +143,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class NormalizePlanarYUVLayerValidationFixture : public NormalizePlanarYUVLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape0, TensorShape shape1, DataType dt, DataLayout data_layout) { NormalizePlanarYUVLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, dt, data_layout, QuantizationInfo()); @@ -155,7 +153,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class NormalizePlanarYUVLayerValidationQuantizedFixture : public NormalizePlanarYUVLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape0, TensorShape shape1, DataType dt, DataLayout data_layout, QuantizationInfo quantization_info) { NormalizePlanarYUVLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, dt, data_layout, quantization_info); diff --git a/tests/validation/fixtures/PadLayerFixture.h b/tests/validation/fixtures/PadLayerFixture.h index 2279c8b2b3..93b43616ff 100644 --- a/tests/validation/fixtures/PadLayerFixture.h +++ b/tests/validation/fixtures/PadLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PaddingFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type, const PaddingList &padding, const PaddingMode mode) { PaddingList clamped_padding = padding; @@ -94,15 +93,15 @@ protected: FunctionType padding; padding.configure(&src, &dst, paddings, const_value, mode); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); diff --git a/tests/validation/fixtures/PermuteFixture.h b/tests/validation/fixtures/PermuteFixture.h index 9bbc0cba5e..b1b3845a8d 100644 --- a/tests/validation/fixtures/PermuteFixture.h +++ b/tests/validation/fixtures/PermuteFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PermuteValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, PermutationVector perm, DataType data_type) { _target = compute_target(input_shape, data_type, perm); @@ -73,15 +72,15 @@ protected: FunctionType perm_func; perm_func.configure(&src, &dst, perm); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/PixelWiseMultiplicationFixture.h b/tests/validation/fixtures/PixelWiseMultiplicationFixture.h index 4eb83859ac..4345d8a13f 100644 --- a/tests/validation/fixtures/PixelWiseMultiplicationFixture.h +++ b/tests/validation/fixtures/PixelWiseMultiplicationFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PixelWiseMultiplicationGenericValidationFixture : public framework::Fixture { public: - template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, @@ -76,24 +75,45 @@ protected: QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info) { // Create tensors - TensorType src1 = create_tensor<TensorType>(shape0, dt_in1, 1, qinfo0); - TensorType src2 = create_tensor<TensorType>(shape1, dt_in2, 1, qinfo1); - TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), dt_out, 1, qinfo_out); + const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1); + TensorType src1 = create_tensor<TensorType>(shape0, dt_in1, 1, qinfo0); + TensorType src2 = create_tensor<TensorType>(shape1, dt_in2, 1, qinfo1); + TensorType dst = create_tensor<TensorType>(out_shape, dt_out, 1, qinfo_out); + + // Check whether do in-place computation and whether inputs are broadcast compatible + TensorType *actual_dst = &dst; + if(_is_inplace) + { + bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out) && (dt_in1 == dt_out); + bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out) && (dt_in2 == dt_out); + bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace); + ARM_COMPUTE_ASSERT(do_in_place); + + if(src1_is_inplace) + { + actual_dst = &src1; + } + else + { + actual_dst = &src2; + } + } auto allocate_tensor = [](TensorType & t) { - ARM_COMPUTE_EXPECT(t.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(t.info()->is_resizable()); t.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!t.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!t.info()->is_resizable()); }; // Create and configure function FunctionType multiply; - multiply.configure(&src1, &src2, (_is_inplace ? &src1 : &dst), scale, convert_policy, rounding_policy, act_info); + multiply.configure(&src1, &src2, actual_dst, scale, convert_policy, rounding_policy, act_info); allocate_tensor(src1); allocate_tensor(src2); + // If don't do in-place computation, still need to allocate original dst if(!_is_inplace) { allocate_tensor(dst); @@ -106,12 +126,7 @@ protected: // Compute function multiply.run(); - if(_is_inplace) - { - return src1; - } - - return dst; + return std::move(*actual_dst); } SimpleTensor<T3> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out, @@ -122,16 +137,12 @@ protected: SimpleTensor<T1> src1{ shape0, dt_in1, 1, qinfo0 }; SimpleTensor<T2> src2{ shape1, dt_in2, 1, qinfo1 }; - // current in-place implementation only supports same metadata of input and output tensors. - // By ignoring output quantization information here, we can make test cases implementation much simpler. - QuantizationInfo output_qinfo = _is_inplace ? qinfo0 : qinfo_out; - // Fill reference fill(src1, 0); fill(src2, 1); - auto result = reference::pixel_wise_multiplication<T1, T2, T3>(src1, src2, scale, convert_policy, rounding_policy, dt_out, output_qinfo); - return act_info.enabled() ? reference::activation_layer(result, act_info, output_qinfo) : result; + auto result = reference::pixel_wise_multiplication<T1, T2, T3>(src1, src2, scale, convert_policy, rounding_policy, dt_out, qinfo_out); + return act_info.enabled() ? reference::activation_layer(result, act_info, qinfo_out) : result; } TensorType _target{}; @@ -143,7 +154,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PixelWiseMultiplicationValidationFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3> { public: - template <typename...> void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, bool is_inplace) { PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(shape, shape, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, @@ -155,7 +165,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PixelWiseMultiplicationBroadcastValidationFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3> { public: - template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, bool is_inplace) { @@ -168,7 +177,17 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PixelWiseMultiplicationValidationFloatFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2> { public: - template <typename...> + void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, ActivationLayerInfo act_info, bool is_inplace) + { + PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy, + QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), act_info, is_inplace); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2> +class PixelWiseMultiplicationValidationIntegerFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2> +{ +public: void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, ActivationLayerInfo act_info, bool is_inplace) { PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, shape, dt_in1, dt_in2, dt_in2, scale, convert_policy, rounding_policy, @@ -180,7 +199,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PixelWiseMultiplicationBroadcastValidationFloatFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2> { public: - template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, ActivationLayerInfo act_info, bool is_inplace) { @@ -193,7 +211,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PixelWiseMultiplicationValidationQuantizedFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3> { public: - template <typename...> void setup(const TensorShape &shape, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { @@ -206,7 +223,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PixelWiseMultiplicationBroadcastValidationQuantizedFixture : public PixelWiseMultiplicationGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3> { public: - template <typename...> void setup(const TensorShape &shape0, const TensorShape &shape1, DataType dt_in1, DataType dt_in2, DataType dt_out, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace) { diff --git a/tests/validation/fixtures/Pooling3dLayerFixture.h b/tests/validation/fixtures/Pooling3dLayerFixture.h new file mode 100644 index 0000000000..1bdf615fb1 --- /dev/null +++ b/tests/validation/fixtures/Pooling3dLayerFixture.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2022-2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_POOLING_3D_LAYER_FIXTURE +#define ARM_COMPUTE_TEST_POOLING_3D_LAYER_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/runtime/Tensor.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/reference/Pooling3dLayer.h" +#include <random> +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class Pooling3dLayerValidationGenericFixture : public framework::Fixture +{ +public: + void setup(TensorShape shape, Pooling3dLayerInfo pool_info, DataType data_type, QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo()) + { + _target = compute_target(shape, pool_info, data_type, input_qinfo, output_qinfo); + _reference = compute_reference(shape, pool_info, data_type, input_qinfo, output_qinfo); + } + +protected: + template <typename U> + void fill(U &&tensor) + { + if(tensor.data_type() == DataType::F32) + { + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, 0); + } + else if(tensor.data_type() == DataType::F16) + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f }; + library->fill(tensor, distribution, 0); + } + else // data type is quantized_asymmetric + { + library->fill_tensor_uniform(tensor, 0); + } + } + + TensorType compute_target(TensorShape shape, Pooling3dLayerInfo info, + DataType data_type, QuantizationInfo input_qinfo, QuantizationInfo output_qinfo) + { + // Create tensors + TensorType src = create_tensor<TensorType>(shape, data_type, 1, input_qinfo, DataLayout::NDHWC); + const TensorShape dst_shape = misc::shape_calculator::compute_pool3d_shape((src.info()->tensor_shape()), info); + TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, output_qinfo, DataLayout::NDHWC); + + // Create and configure function + FunctionType pool_layer; + pool_layer.validate(src.info(), dst.info(), info); + pool_layer.configure(&src, &dst, info); + + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + // Allocate tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + + // Fill tensors + fill(AccessorType(src)); + + // Compute function + pool_layer.run(); + return dst; + } + + SimpleTensor<T> compute_reference(TensorShape shape, Pooling3dLayerInfo info, DataType data_type, QuantizationInfo input_qinfo, QuantizationInfo output_qinfo) + { + // Create reference + SimpleTensor<T> src(shape, data_type, 1, input_qinfo, DataLayout::NDHWC); + // Fill reference + fill(src); + return reference::pooling_3d_layer<T>(src, info, output_qinfo); + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class Pooling3dLayerValidationFixture : public Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape shape, PoolingType pool_type, Size3D pool_size, Size3D stride, Padding3D padding, bool exclude_padding, DataType data_type) + { + Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, Pooling3dLayerInfo(pool_type, pool_size, stride, padding, exclude_padding), + data_type); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class Pooling3dLayerValidationQuantizedFixture : public Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape shape, PoolingType pool_type, Size3D pool_size, Size3D stride, Padding3D padding, bool exclude_padding, DataType data_type, + QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo()) + { + Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, Pooling3dLayerInfo(pool_type, pool_size, stride, padding, exclude_padding), + data_type, input_qinfo, output_qinfo); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class Pooling3dLayerGlobalValidationFixture : public Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape shape, PoolingType pool_type, DataType data_type) + { + Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, Pooling3dLayerInfo(pool_type), data_type); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class SpecialPooling3dLayerValidationFixture : public Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape src_shape, Pooling3dLayerInfo pool_info, DataType data_type) + { + Pooling3dLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_POOLING_3D_LAYER_FIXTURE */ diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h index af078d4ce3..59c920868b 100644 --- a/tests/validation/fixtures/PoolingLayerFixture.h +++ b/tests/validation/fixtures/PoolingLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,16 +45,31 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PoolingLayerValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, bool indices = false, - QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo()) + QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo(), bool mixed_layout = false) { - _pool_info = pool_info; - _target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices); - _reference = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices); + _mixed_layout = mixed_layout; + _pool_info = pool_info; + _target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices); + _reference = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices); } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout = src.info()->data_layout(); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout); + dst.info()->set_data_layout(data_layout); + } + template <typename U> void fill(U &&tensor) { @@ -94,25 +109,33 @@ protected: FunctionType pool_layer; pool_layer.configure(&src, &dst, info, (indices) ? &_target_indices : nullptr); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(_target_indices.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(_target_indices.info()->is_resizable()); + + add_padding_x({ &src, &dst, &_target_indices }, data_layout); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); _target_indices.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!_target_indices.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!_target_indices.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); - // Compute function - pool_layer.run(); - + if(_mixed_layout) + { + mix_layout(pool_layer, src, dst); + } + else + { + // Compute function + pool_layer.run(); + } return dst; } @@ -129,6 +152,7 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; PoolingLayerInfo _pool_info{}; + bool _mixed_layout{ false }; TensorType _target_indices{}; SimpleTensor<uint32_t> _ref_indices{}; }; @@ -136,23 +160,22 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PoolingLayerIndicesValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> - void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout) + void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout, bool use_kernel_indices) { - PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding), + PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding, false, + true, use_kernel_indices), data_type, data_layout, true); } }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class PoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout) { PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding), - data_type, data_layout); + data_type, data_layout, false, mixed_layout); } }; @@ -160,7 +183,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PoolingLayerValidationMixedPrecisionFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout, bool fp_mixed_precision = false) { PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding, fp_mixed_precision), @@ -168,16 +190,15 @@ public: } }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class PoolingLayerValidationQuantizedFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout = DataLayout::NCHW, QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo()) { PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding), - data_type, data_layout, false, input_qinfo, output_qinfo); + data_type, data_layout, false, input_qinfo, output_qinfo, mixed_layout); } }; @@ -185,10 +206,9 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class SpecialPoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape src_shape, PoolingLayerInfo pool_info, DataType data_type) { - PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, DataLayout::NCHW); + PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, pool_info.data_layout); } }; @@ -196,7 +216,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class GlobalPoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, PoolingType pool_type, DataType data_type, DataLayout data_layout = DataLayout::NCHW) { PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, data_layout), data_type, data_layout); diff --git a/tests/validation/fixtures/PriorBoxLayerFixture.h b/tests/validation/fixtures/PriorBoxLayerFixture.h index ef18c0d787..0a76cfd155 100644 --- a/tests/validation/fixtures/PriorBoxLayerFixture.h +++ b/tests/validation/fixtures/PriorBoxLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class PriorBoxLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, PriorBoxLayerInfo info, DataType data_type, DataLayout data_layout) { TensorInfo input_info(input_shape, 1, data_type); @@ -73,18 +72,18 @@ protected: FunctionType prior_box; prior_box.configure(&src1, &src2, &dst, info); - ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src1.allocator()->allocate(); src2.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src1.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!src2.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src1.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!src2.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Compute function prior_box.run(); diff --git a/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h b/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h index 0cf2ef04f7..e864b4affe 100644 --- a/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h +++ b/tests/validation/fixtures/QLSTMLayerNormalizationFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Arm Limited. + * Copyright (c) 2020-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class QLSTMLayerNormalizationValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weight_shape, TensorShape bias_shape, DataType data_type, QuantizationInfo weight_qinfo) { ARM_COMPUTE_ERROR_ON(data_type != DataType::QSYMM16); @@ -91,9 +90,9 @@ protected: { for(auto t : tensors) { - ARM_COMPUTE_EXPECT(t->info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(t->info()->is_resizable()); t->allocator()->allocate(); - ARM_COMPUTE_EXPECT(!t->info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!t->info()->is_resizable()); } } diff --git a/tests/validation/fixtures/QuantizationLayerFixture.h b/tests/validation/fixtures/QuantizationLayerFixture.h index 4f46f99ff5..1b21967bda 100644 --- a/tests/validation/fixtures/QuantizationLayerFixture.h +++ b/tests/validation/fixtures/QuantizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class QuantizationValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in) { _target = compute_target(shape, data_type_in, data_type_out, qinfo, qinfo_in); @@ -70,15 +69,15 @@ protected: FunctionType quantization_layer; quantization_layer.configure(&src, &dst); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -108,7 +107,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class QuantizationValidationFixture : public QuantizationValidationGenericFixture<TensorType, AccessorType, FunctionType, Tin, Tout> { public: - template <typename...> void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo) { QuantizationValidationGenericFixture<TensorType, AccessorType, FunctionType, Tin, Tout>::setup(shape, data_type_in, data_type_out, qinfo, QuantizationInfo()); diff --git a/tests/validation/fixtures/RNNLayerFixture.h b/tests/validation/fixtures/RNNLayerFixture.h index 394d91cd6f..e9a05e7838 100644 --- a/tests/validation/fixtures/RNNLayerFixture.h +++ b/tests/validation/fixtures/RNNLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -42,7 +42,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class RNNLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape recurrent_weights_shape, TensorShape bias_shape, TensorShape output_shape, ActivationLayerInfo info, DataType data_type) { @@ -76,12 +75,12 @@ protected: FunctionType rnn; rnn.configure(&input, &weights, &recurrent_weights, &bias, &hidden_state, &output, info); - ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(recurrent_weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(hidden_state.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(recurrent_weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(hidden_state.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output.info()->is_resizable()); // Allocate tensors input.allocator()->allocate(); @@ -91,12 +90,12 @@ protected: hidden_state.allocator()->allocate(); output.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!recurrent_weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!hidden_state.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!recurrent_weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!hidden_state.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output.info()->is_resizable()); // Fill tensors fill(AccessorType(input), 0); diff --git a/tests/validation/fixtures/ROIAlignLayerFixture.h b/tests/validation/fixtures/ROIAlignLayerFixture.h index c631c24cff..ad76dcbbd9 100644 --- a/tests/validation/fixtures/ROIAlignLayerFixture.h +++ b/tests/validation/fixtures/ROIAlignLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ROIAlignLayerGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo) { _rois_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::QASYMM16 : data_type; @@ -138,18 +137,18 @@ protected: FunctionType roi_align_layer; roi_align_layer.configure(&src, &rois_tensor, &dst, pool_info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(rois_tensor.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rois_tensor.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); rois_tensor.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rois_tensor.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rois_tensor.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -189,7 +188,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ROIAlignLayerFixture : public ROIAlignLayerGenericFixture<TensorType, AccessorType, FunctionType, T, TRois> { public: - template <typename...> void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout) { ROIAlignLayerGenericFixture<TensorType, AccessorType, FunctionType, T, TRois>::setup(input_shape, pool_info, rois_shape, data_type, data_layout, @@ -201,7 +199,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ROIAlignLayerQuantizedFixture : public ROIAlignLayerGenericFixture<TensorType, AccessorType, FunctionType, T, TRois> { public: - template <typename...> void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo) { diff --git a/tests/validation/fixtures/ROIPoolingLayerFixture.h b/tests/validation/fixtures/ROIPoolingLayerFixture.h new file mode 100644 index 0000000000..4b46a6176d --- /dev/null +++ b/tests/validation/fixtures/ROIPoolingLayerFixture.h @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2021, 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_ROIPOOLINGLAYER_FIXTURE +#define ARM_COMPUTE_TEST_ROIPOOLINGLAYER_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/reference/ROIPoolingLayer.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ROIPoolingLayerGenericFixture : public framework::Fixture +{ +public: + void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo) + { + _target = compute_target(input_shape, data_type, data_layout, pool_info, rois_shape, qinfo, output_qinfo); + _reference = compute_reference(input_shape, data_type, pool_info, rois_shape, qinfo, output_qinfo); + } + +protected: + template <typename U> + void fill(U &&tensor) + { + library->fill_tensor_uniform(tensor, 0); + } + + template <typename U> + void generate_rois(U &&rois, const TensorShape &shape, const ROIPoolingLayerInfo &pool_info, TensorShape rois_shape, DataLayout data_layout = DataLayout::NCHW) + { + const size_t values_per_roi = rois_shape.x(); + const size_t num_rois = rois_shape.y(); + + std::mt19937 gen(library->seed()); + uint16_t *rois_ptr = static_cast<uint16_t *>(rois.data()); + + const float pool_width = pool_info.pooled_width(); + const float pool_height = pool_info.pooled_height(); + const float roi_scale = pool_info.spatial_scale(); + + // Calculate distribution bounds + const auto scaled_width = static_cast<float>((shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)] / roi_scale) / pool_width); + const auto scaled_height = static_cast<float>((shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)] / roi_scale) / pool_height); + const auto min_width = static_cast<float>(pool_width / roi_scale); + const auto min_height = static_cast<float>(pool_height / roi_scale); + + // Create distributions + std::uniform_int_distribution<int> dist_batch(0, shape[3] - 1); + std::uniform_int_distribution<> dist_x1(0, scaled_width); + std::uniform_int_distribution<> dist_y1(0, scaled_height); + std::uniform_int_distribution<> dist_w(min_width, std::max(float(min_width), (pool_width - 2) * scaled_width)); + std::uniform_int_distribution<> dist_h(min_height, std::max(float(min_height), (pool_height - 2) * scaled_height)); + + for(unsigned int pw = 0; pw < num_rois; ++pw) + { + const auto batch_idx = dist_batch(gen); + const auto x1 = dist_x1(gen); + const auto y1 = dist_y1(gen); + const auto x2 = x1 + dist_w(gen); + const auto y2 = y1 + dist_h(gen); + + rois_ptr[values_per_roi * pw] = batch_idx; + rois_ptr[values_per_roi * pw + 1] = static_cast<uint16_t>(x1); + rois_ptr[values_per_roi * pw + 2] = static_cast<uint16_t>(y1); + rois_ptr[values_per_roi * pw + 3] = static_cast<uint16_t>(x2); + rois_ptr[values_per_roi * pw + 4] = static_cast<uint16_t>(y2); + } + } + + TensorType compute_target(TensorShape input_shape, + DataType data_type, + DataLayout data_layout, + const ROIPoolingLayerInfo &pool_info, + const TensorShape rois_shape, + const QuantizationInfo &qinfo, + const QuantizationInfo &output_qinfo) + { + const QuantizationInfo rois_qinfo = is_data_type_quantized(data_type) ? QuantizationInfo(0.125f, 0) : QuantizationInfo(); + + // Create tensors + TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, qinfo, data_layout); + TensorType rois_tensor = create_tensor<TensorType>(rois_shape, _rois_data_type, 1, rois_qinfo); + + // Initialise shape and declare output tensor dst + const TensorShape dst_shape; + TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, output_qinfo, data_layout); + + // Create and configure function + FunctionType roi_pool_layer; + roi_pool_layer.configure(&src, &rois_tensor, &dst, pool_info); + + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(rois_tensor.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + // Allocate tensors + src.allocator()->allocate(); + rois_tensor.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!rois_tensor.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + + // Fill tensors + fill(AccessorType(src)); + generate_rois(AccessorType(rois_tensor), input_shape, pool_info, rois_shape, data_layout); + + // Compute function + roi_pool_layer.run(); + + return dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &input_shape, + DataType data_type, + const ROIPoolingLayerInfo &pool_info, + const TensorShape rois_shape, + const QuantizationInfo &qinfo, + const QuantizationInfo &output_qinfo) + { + // Create reference tensor + SimpleTensor<T> src{ input_shape, data_type, 1, qinfo }; + const QuantizationInfo rois_qinfo = is_data_type_quantized(data_type) ? QuantizationInfo(0.125f, 0) : QuantizationInfo(); + SimpleTensor<uint16_t> rois_tensor{ rois_shape, _rois_data_type, 1, rois_qinfo }; + + // Fill reference tensor + fill(src); + generate_rois(rois_tensor, input_shape, pool_info, rois_shape); + + return reference::roi_pool_layer(src, rois_tensor, pool_info, output_qinfo); + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; + const DataType _rois_data_type{ DataType::U16 }; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ROIPoolingLayerQuantizedFixture : public ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, + DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo) + { + ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, pool_info, rois_shape, + data_type, data_layout, qinfo, output_qinfo); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ROIPoolingLayerFixture : public ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout) + { + ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, pool_info, rois_shape, data_type, data_layout, + QuantizationInfo(), QuantizationInfo()); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute + +#endif /* ARM_COMPUTE_TEST_ROIPOOLINGLAYER_FIXTURE */
\ No newline at end of file diff --git a/tests/validation/fixtures/RangeFixture.h b/tests/validation/fixtures/RangeFixture.h index 0713db9237..166613a318 100644 --- a/tests/validation/fixtures/RangeFixture.h +++ b/tests/validation/fixtures/RangeFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -55,7 +55,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class RangeFixture : public framework::Fixture { public: - template <typename...> void setup(const DataType data_type0, float start, float step, const QuantizationInfo qinfo0 = QuantizationInfo()) { _target = compute_target(data_type0, qinfo0, start, step); @@ -113,11 +112,11 @@ protected: FunctionType range_func; range_func.configure(&dst, start, end, step); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Compute function range_func.run(); diff --git a/tests/validation/fixtures/ReduceMeanFixture.h b/tests/validation/fixtures/ReduceMeanFixture.h index 36bf14b27e..e61941435c 100644 --- a/tests/validation/fixtures/ReduceMeanFixture.h +++ b/tests/validation/fixtures/ReduceMeanFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,7 +47,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ReduceMeanValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info_input, QuantizationInfo quantization_info_output) { _target = compute_target(shape, data_type, axis, keep_dims, quantization_info_input, quantization_info_output); @@ -92,15 +91,15 @@ protected: FunctionType reduction_mean; reduction_mean.configure(&src, axis, keep_dims, &dst); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -124,7 +123,13 @@ protected: { TensorShape output_shape = i == 0 ? src_shape : out.shape(); output_shape.set(axis[i], 1); - out = reference::reduction_operation<T, T>(i == 0 ? src : out, output_shape, axis[i], ReductionOperation::MEAN_SUM, quantization_info_output); + bool is_opencl = false; + +#ifdef ARM_COMPUTE_OPENCL_ENABLED + is_opencl = std::is_same<CLTensor, TensorType>::value; // Round down to zero on opencl to match kernel +#endif /* ARM_COMPUTE_OPENCL_ENABLED */ + out = reference::reduction_operation<T, T>(i == 0 ? src : out, output_shape, axis[i], ReductionOperation::MEAN_SUM, data_type, quantization_info_output, + is_opencl ? RoundingPolicy::TO_ZERO : RoundingPolicy::TO_NEAREST_UP); } if(!keep_dims) @@ -133,7 +138,7 @@ protected: std::sort(axis.begin(), axis.begin() + axis.num_dimensions()); for(unsigned int i = 0; i < axis.num_dimensions(); ++i) { - output_shape.remove_dimension(axis[i] - i); + output_shape.remove_dimension(axis[i] - i, false); } out = reference::reshape_layer(out, output_shape); @@ -149,7 +154,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ReduceMeanQuantizedFixture : public ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info_input, QuantizationInfo quantization_info_output) { ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, keep_dims, quantization_info_input, quantization_info_output); @@ -160,7 +164,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ReduceMeanFixture : public ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims) { ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, keep_dims, QuantizationInfo(), QuantizationInfo()); diff --git a/tests/validation/fixtures/ReductionOperationFixture.h b/tests/validation/fixtures/ReductionOperationFixture.h index f3d653e6dc..b44f299486 100644 --- a/tests/validation/fixtures/ReductionOperationFixture.h +++ b/tests/validation/fixtures/ReductionOperationFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ReductionOperationValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info, bool keep_dims = false) { const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN); @@ -76,14 +75,14 @@ protected: if(tensor.data_type() == DataType::QASYMM8) { std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second); + std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second); library->fill(tensor, distribution, 0); } else if(tensor.data_type() == DataType::QASYMM8_SIGNED) { std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second); + std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second); library->fill(tensor, distribution, 0); } @@ -108,15 +107,15 @@ protected: FunctionType reduction_func; reduction_func.configure(&src, &dst, axis, op, _keep_dims); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -135,7 +134,7 @@ protected: // Fill reference fill(src); - return reference::reduction_operation<T, T>(src, dst_shape, axis, op, quantization_info); + return reference::reduction_operation<T, T>(src, dst_shape, axis, op, data_type, quantization_info); } TensorType _target{}; @@ -149,7 +148,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ReductionOperationQuantizedFixture : public ReductionOperationValidationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info = QuantizationInfo(), bool keep_dims = false) { ReductionOperationValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, op, quantization_info, keep_dims); @@ -160,7 +158,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ReductionOperationFixture : public ReductionOperationValidationFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op, bool keep_dims = false) { ReductionOperationValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, op, QuantizationInfo(), keep_dims); diff --git a/tests/validation/fixtures/RemapFixture.h b/tests/validation/fixtures/RemapFixture.h deleted file mode 100644 index e851cdb4df..0000000000 --- a/tests/validation/fixtures/RemapFixture.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2017-2018 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_TEST_REMAP_FIXTURE -#define ARM_COMPUTE_TEST_REMAP_FIXTURE - -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" -#include "tests/AssetsLibrary.h" -#include "tests/Globals.h" -#include "tests/IAccessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Fixture.h" -#include "tests/validation/reference/Remap.h" - -#include <random> - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class RemapValidationFixture : public framework::Fixture -{ -public: - template <typename...> - void setup(TensorShape shape, InterpolationPolicy policy, DataType data_type, BorderMode border_mode) - { - std::mt19937 gen(library->seed()); - std::uniform_int_distribution<uint8_t> distribution(0, 255); - const T constant_border_value = static_cast<T>(distribution(gen)); - - _target = compute_target(shape, policy, data_type, border_mode, constant_border_value); - _reference = compute_reference(shape, policy, data_type, border_mode, constant_border_value); - } - -protected: - template <typename U> - void fill(U &&tensor, int i, float min, float max) - { - std::uniform_int_distribution<> distribution((int)min, (int)max); - library->fill(tensor, distribution, i); - } - - TensorType compute_target(const TensorShape &shape, InterpolationPolicy policy, DataType data_type, BorderMode border_mode, T constant_border_value) - { - // Create tensors - TensorType src = create_tensor<TensorType>(shape, data_type); - TensorType map_x = create_tensor<TensorType>(shape, DataType::F32); - TensorType map_y = create_tensor<TensorType>(shape, DataType::F32); - TensorType dst = create_tensor<TensorType>(shape, data_type); - - // Create and configure function - FunctionType remap; - remap.configure(&src, &map_x, &map_y, &dst, policy, border_mode, constant_border_value); - - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(map_x.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(map_y.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Allocate tensors - src.allocator()->allocate(); - map_x.allocator()->allocate(); - map_y.allocator()->allocate(); - dst.allocator()->allocate(); - - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!map_x.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!map_y.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Fill tensors - fill(AccessorType(src), 0, 0, 255); - fill(AccessorType(map_x), 1, -5, shape.x() + 5); - fill(AccessorType(map_y), 2, -5, shape.y() + 5); - - // Compute function - remap.run(); - - return dst; - } - - SimpleTensor<T> compute_reference(const TensorShape &shape, InterpolationPolicy policy, DataType data_type, BorderMode border_mode, T constant_border_value) - { - ARM_COMPUTE_ERROR_ON(data_type != DataType::U8); - - // Create reference - SimpleTensor<T> src{ shape, data_type }; - SimpleTensor<float> map_x{ shape, DataType::F32 }; - SimpleTensor<float> map_y{ shape, DataType::F32 }; - - // Create the valid mask Tensor - _valid_mask = SimpleTensor<T> { shape, data_type }; - - // Fill reference - fill(src, 0, 0, 255); - fill(map_x, 1, -5, shape.x() + 5); - fill(map_y, 2, -5, shape.y() + 5); - - // Compute reference - return reference::remap<T>(src, map_x, map_y, _valid_mask, policy, border_mode, constant_border_value); - } - - TensorType _target{}; - SimpleTensor<T> _reference{}; - SimpleTensor<T> _valid_mask{}; -}; -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_REMAP_FIXTURE */ diff --git a/tests/validation/fixtures/ReorderFixture.h b/tests/validation/fixtures/ReorderFixture.h new file mode 100644 index 0000000000..8e28484c48 --- /dev/null +++ b/tests/validation/fixtures/ReorderFixture.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2023-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_REORDERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_REORDERFIXTURE_H + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/reference/Reorder.h" +#include "src/core/NEON/kernels/arm_gemm/utils.hpp" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +/** [ReorderLayer fixture] **/ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ReorderValidationFixture : public framework::Fixture +{ +public: + void check_hardware_supports(WeightFormat output_wf){ + if(!Scheduler::get().cpu_info().has_sve() && output_wf!=WeightFormat::OHWIo4){ + _hardware_supports = false; + } + if (Scheduler::get().cpu_info().has_sve() && arm_gemm::utils::get_vector_length<float>() != 8 && output_wf==WeightFormat::OHWIo8) + { + _hardware_supports = false; + } + } + + void setup(TensorShape input_shape, TensorShape output_shape, WeightFormat input_wf, WeightFormat output_wf, DataType data_type) + { + check_hardware_supports(output_wf); + if (_hardware_supports){ + _target = compute_target(input_shape, output_shape, input_wf, output_wf, data_type); + _reference = compute_reference(input_shape, output_shape, output_wf, data_type); + } + } + + protected: + template <typename U> + void fill(U &&tensor) + { + library->fill_tensor_uniform(tensor, 0); + } + + TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, WeightFormat input_wf, WeightFormat output_wf, DataType data_type) + { + // Create tensors + TensorType src = create_tensor<TensorType>(input_shape, data_type); + TensorType dst = create_tensor<TensorType>(output_shape, data_type); + + // Create and configure function + FunctionType reorder; + + reorder.configure(&src, &dst, input_wf, output_wf); + + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + // Allocate tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + + // Fill tensors + fill(AccessorType(src)); + + // Compute function + reorder.run(); + + return dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, WeightFormat output_wf, DataType data_type) + { + // Create reference + SimpleTensor<T> src{ input_shape, data_type }; + + // Fill reference + fill(src); + + return reference::reorder_layer<T>(src, output_shape, output_wf); + } + + bool _hardware_supports = true; + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; +/** [ReorderLayer fixture] **/ +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_REORDERFIXTURE_H diff --git a/tests/validation/fixtures/ReorgLayerFixture.h b/tests/validation/fixtures/ReorgLayerFixture.h index 630802214a..f87017190e 100644 --- a/tests/validation/fixtures/ReorgLayerFixture.h +++ b/tests/validation/fixtures/ReorgLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -43,7 +43,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ReorgLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, int32_t stride, DataType data_type, DataLayout data_layout) { _target = compute_target(input_shape, stride, data_type, data_layout); @@ -74,15 +73,15 @@ protected: reorg.configure(&src, &dst, stride); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); diff --git a/tests/validation/fixtures/ReshapeLayerFixture.h b/tests/validation/fixtures/ReshapeLayerFixture.h index a89a94741f..5be431f8cf 100644 --- a/tests/validation/fixtures/ReshapeLayerFixture.h +++ b/tests/validation/fixtures/ReshapeLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_RESHAPE_LAYER_FIXTURE -#define ARM_COMPUTE_TEST_RESHAPE_LAYER_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_RESHAPELAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_RESHAPELAYERFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -31,6 +31,7 @@ #include "tests/IAccessor.h" #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" #include "tests/validation/reference/ReshapeLayer.h" namespace arm_compute @@ -41,13 +42,12 @@ namespace validation { /** [ReshapeLayer fixture] **/ template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class ReshapeLayerValidationFixture : public framework::Fixture +class ReshapeLayerGenericValidationFixture : public framework::Fixture { public: - template <typename...> - void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type) + void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type, bool add_x_padding = false) { - _target = compute_target(input_shape, output_shape, data_type); + _target = compute_target(input_shape, output_shape, data_type, add_x_padding); _reference = compute_reference(input_shape, output_shape, data_type); } @@ -58,10 +58,10 @@ protected: library->fill_tensor_uniform(tensor, i); } - TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type) + TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type, bool add_x_padding = false) { // Check if indeed the input shape can be reshape to the output one - ARM_COMPUTE_EXPECT(input_shape.total_size() == output_shape.total_size(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(input_shape.total_size() == output_shape.total_size()); // Create tensors TensorType src = create_tensor<TensorType>(input_shape, data_type); @@ -72,15 +72,21 @@ protected: reshape.configure(&src, &dst); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + if(add_x_padding) + { + // Add random padding in x dimension + add_padding_x({ &src, &dst }); + } // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); @@ -105,8 +111,27 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; }; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ReshapeLayerValidationFixture : public ReshapeLayerGenericValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type) + { + ReshapeLayerGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, output_shape, data_type); + } +}; +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ReshapeLayerPaddedValidationFixture : public ReshapeLayerGenericValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type) + { + ReshapeLayerGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, output_shape, data_type, true /* add_x_padding */); + } +}; /** [ReshapeLayer fixture] **/ } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_RESHAPE_LAYER_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_RESHAPELAYERFIXTURE_H diff --git a/tests/validation/fixtures/ReverseFixture.h b/tests/validation/fixtures/ReverseFixture.h index 4982cae578..856bff7b12 100644 --- a/tests/validation/fixtures/ReverseFixture.h +++ b/tests/validation/fixtures/ReverseFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_REVERSE_FIXTURE -#define ARM_COMPUTE_TEST_REVERSE_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_REVERSEFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_REVERSEFIXTURE_H #include "arm_compute/core/Helpers.h" #include "arm_compute/core/TensorShape.h" @@ -45,11 +45,11 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ReverseValidationFixture : public framework::Fixture { public: - template <typename...> - void setup(TensorShape shape, TensorShape axis_shape, DataType data_type) + void setup(TensorShape shape, TensorShape axis_shape, DataType data_type, bool use_negative_axis = false, bool use_inverted_axis = false) { - _target = compute_target(shape, axis_shape, data_type); - _reference = compute_reference(shape, axis_shape, data_type); + _num_dims = shape.num_dimensions(); + _target = compute_target(shape, axis_shape, data_type, use_negative_axis, use_inverted_axis); + _reference = compute_reference(shape, axis_shape, data_type, use_negative_axis, use_inverted_axis); } protected: @@ -58,16 +58,25 @@ protected: { library->fill_tensor_uniform(tensor, 0); } - std::vector<int> generate_random_axis() + std::vector<int32_t> generate_random_axis(bool use_negative = false) { - std::vector<int> axis_v = { 0, 1, 2, 3 }; - std::mt19937 g(0); + std::vector<int32_t> axis_v; + if(use_negative) + { + axis_v = { -1, -2, -3, -4 }; + } + else + { + axis_v = { 0, 1, 2, 3 }; + } + axis_v = std::vector<int32_t>(axis_v.begin(), axis_v.begin() + _num_dims); + std::mt19937 g(library->seed()); std::shuffle(axis_v.begin(), axis_v.end(), g); return axis_v; } - TensorType compute_target(const TensorShape &shape, const TensorShape &axis_shape, DataType data_type) + TensorType compute_target(const TensorShape &shape, const TensorShape &axis_shape, DataType data_type, bool use_negative_axis, bool use_inverted_axis = false) { // Create tensors TensorType src = create_tensor<TensorType>(shape, data_type, 1); @@ -76,27 +85,27 @@ protected: // Create and configure function FunctionType reverse_func; - reverse_func.configure(&src, &dst, &axis); + reverse_func.configure(&src, &dst, &axis, use_inverted_axis); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(axis.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(axis.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); axis.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!axis.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!axis.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); { auto axis_data = AccessorType(axis); - auto axis_v = generate_random_axis(); - std::copy(axis_v.begin(), axis_v.begin() + axis_shape.x(), static_cast<int32_t *>(axis_data.data())); + auto axis_v = generate_random_axis(use_negative_axis); + std::copy(axis_v.begin(), axis_v.begin() + axis_shape.total_size(), static_cast<int32_t *>(axis_data.data())); } // Compute function @@ -105,24 +114,25 @@ protected: return dst; } - SimpleTensor<T> compute_reference(const TensorShape &shape, const TensorShape &axis_shape, DataType data_type) + SimpleTensor<T> compute_reference(const TensorShape &shape, const TensorShape &axis_shape, DataType data_type, bool use_negative_axis, bool use_inverted_axis = false) { // Create reference - SimpleTensor<T> src{ shape, data_type }; - SimpleTensor<uint32_t> axis{ axis_shape, DataType::U32 }; + SimpleTensor<T> src{ shape, data_type }; + SimpleTensor<int32_t> axis{ axis_shape, DataType::S32 }; // Fill reference fill(src); - auto axis_v = generate_random_axis(); - std::copy(axis_v.begin(), axis_v.begin() + axis_shape.x(), axis.data()); + auto axis_v = generate_random_axis(use_negative_axis); + std::copy(axis_v.begin(), axis_v.begin() + axis_shape.total_size(), axis.data()); - return reference::reverse<T>(src, axis); + return reference::reverse<T>(src, axis, use_inverted_axis); } TensorType _target{}; SimpleTensor<T> _reference{}; + unsigned int _num_dims{}; }; } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_REVERSE_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_REVERSEFIXTURE_H diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h index dd521470e6..86d89d71f7 100644 --- a/tests/validation/fixtures/ScaleFixture.h +++ b/tests/validation/fixtures/ScaleFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,15 +21,10 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_SCALE_FIXTURE -#define ARM_COMPUTE_TEST_SCALE_FIXTURE - -#include "arm_compute/core/TensorShape.h" -#include "arm_compute/core/Types.h" -#include "tests/AssetsLibrary.h" -#include "tests/Globals.h" -#include "tests/IAccessor.h" -#include "tests/framework/Asserts.h" +#ifndef ACL_TESTS_VALIDATION_FIXTURES_SCALEFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_SCALEFIXTURE_H + +#include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT #include "tests/framework/Fixture.h" #include "tests/validation/reference/Permute.h" #include "tests/validation/reference/Scale.h" @@ -44,22 +39,23 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ScaleValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy, - bool align_corners) + bool align_corners, bool mixed_layout, QuantizationInfo output_quantization_info) { - _shape = shape; - _policy = policy; - _border_mode = border_mode; - _sampling_policy = sampling_policy; - _data_type = data_type; - _quantization_info = quantization_info; - _align_corners = align_corners; + _shape = shape; + _policy = policy; + _border_mode = border_mode; + _sampling_policy = sampling_policy; + _data_type = data_type; + _input_quantization_info = quantization_info; + _output_quantization_info = output_quantization_info; + _align_corners = align_corners; + _mixed_layout = mixed_layout; generate_scale(shape); - std::mt19937 generator(library->seed()); - std::uniform_int_distribution<uint8_t> distribution_u8(0, 255); + std::mt19937 generator(library->seed()); + std::uniform_int_distribution<uint32_t> distribution_u8(0, 255); _constant_border_value = static_cast<T>(distribution_u8(generator)); _target = compute_target(shape, data_layout); @@ -67,6 +63,21 @@ public: } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout = src.info()->data_layout(); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout); + dst.info()->set_data_layout(data_layout); + } + void generate_scale(const TensorShape &shape) { static constexpr float _min_scale{ 0.25f }; @@ -128,7 +139,7 @@ protected: } // Create tensors - TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _quantization_info, data_layout); + TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info, data_layout); const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); @@ -136,40 +147,48 @@ protected: TensorShape shape_scaled(shape); shape_scaled.set(idx_width, shape[idx_width] * _scale_x, /* apply_dim_correction = */ false); shape_scaled.set(idx_height, shape[idx_height] * _scale_y, /* apply_dim_correction = */ false); - TensorType dst = create_tensor<TensorType>(shape_scaled, _data_type, 1, _quantization_info, data_layout); + TensorType dst = create_tensor<TensorType>(shape_scaled, _data_type, 1, _output_quantization_info, data_layout); // Create and configure function FunctionType scale; scale.configure(&src, &dst, ScaleKernelInfo{ _policy, _border_mode, _constant_border_value, _sampling_policy, /* use_padding */ false, _align_corners }); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + add_padding_x({ &src, &dst }, data_layout); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); - // Compute function - scale.run(); - + if(_mixed_layout) + { + mix_layout(scale, src, dst); + } + else + { + // Compute function + scale.run(); + } return dst; } SimpleTensor<T> compute_reference(const TensorShape &shape) { // Create reference - SimpleTensor<T> src{ shape, _data_type, 1, _quantization_info }; + SimpleTensor<T> src{ shape, _data_type, 1, _input_quantization_info }; // Fill reference fill(src); - return reference::scale<T>(src, _scale_x, _scale_y, _policy, _border_mode, _constant_border_value, _sampling_policy, /* ceil_policy_scale */ false, _align_corners); + return reference::scale<T>(src, _scale_x, _scale_y, _policy, _border_mode, _constant_border_value, _sampling_policy, /* ceil_policy_scale */ false, _align_corners, _output_quantization_info); } TensorType _target{}; @@ -180,17 +199,18 @@ protected: T _constant_border_value{}; SamplingPolicy _sampling_policy{}; DataType _data_type{}; - QuantizationInfo _quantization_info{}; + QuantizationInfo _input_quantization_info{}; + QuantizationInfo _output_quantization_info{}; bool _align_corners{ false }; + bool _mixed_layout{ false }; float _scale_x{ 1.f }; float _scale_y{ 1.f }; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class ScaleValidationQuantizedFixture : public ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy, bool align_corners) { @@ -201,14 +221,35 @@ public: policy, border_mode, sampling_policy, - align_corners); + align_corners, + mixed_layout, + quantization_info); } }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> +class ScaleValidationDifferentOutputQuantizedFixture : public ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape shape, DataType data_type, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, InterpolationPolicy policy, + BorderMode border_mode, SamplingPolicy sampling_policy, + bool align_corners) + { + ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, + data_type, + input_quantization_info, + data_layout, + policy, + border_mode, + sampling_policy, + align_corners, + mixed_layout, + output_quantization_info); + } +}; +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class ScaleValidationFixture : public ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape shape, DataType data_type, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy, bool align_corners) { ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, @@ -218,10 +259,12 @@ public: policy, border_mode, sampling_policy, - align_corners); + align_corners, + mixed_layout, + QuantizationInfo()); } }; } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_SCALE_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_SCALEFIXTURE_H diff --git a/tests/validation/fixtures/ScatterLayerFixture.h b/tests/validation/fixtures/ScatterLayerFixture.h new file mode 100644 index 0000000000..af161ef98b --- /dev/null +++ b/tests/validation/fixtures/ScatterLayerFixture.h @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_SCATTERLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_SCATTERLAYERFIXTURE_H + +#include "arm_compute/core/Utils.h" +#include "arm_compute/runtime/CL/CLTensorAllocator.h" +#include "tests/Globals.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/Validation.h" +#include "tests/validation/reference/ScatterLayer.h" +#include "tests/SimpleTensor.h" + +#include <random> +#include <cstdint> + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ScatterGenericValidationFixture : public framework::Fixture +{ +public: + void setup(TensorShape src_shape, TensorShape updates_shape, TensorShape indices_shape, + TensorShape out_shape, DataType data_type, ScatterInfo scatter_info, bool inplace, bool padding, + QuantizationInfo src_qinfo = QuantizationInfo(), QuantizationInfo o_qinfo = QuantizationInfo()) + { + // this is for improving randomness across tests + _hash = src_shape[0] + src_shape[1] + src_shape[2] + src_shape[3] + src_shape[4] + src_shape[5] + + updates_shape[0] + updates_shape[1] + updates_shape[2] + updates_shape[3] + + updates_shape[4] + updates_shape[5] + + indices_shape[0] + indices_shape[1] + indices_shape[2] + indices_shape[3]; + + _target = compute_target(src_shape, updates_shape, indices_shape, out_shape, data_type, scatter_info, inplace, padding, src_qinfo, o_qinfo); + _reference = compute_reference(src_shape, updates_shape, indices_shape, out_shape, data_type,scatter_info, src_qinfo , o_qinfo); + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + switch(tensor.data_type()) + { + case DataType::F32: + case DataType::F16: + { + std::uniform_real_distribution<float> distribution(-10.f, 10.f); + library->fill(tensor, distribution, i); + break; + } + case DataType::S32: + case DataType::S16: + case DataType::S8: + { + std::uniform_int_distribution<int32_t> distribution(-100, 100); + library->fill(tensor, distribution, i); + break; + } + case DataType::U32: + case DataType::U16: + case DataType::U8: + { + std::uniform_int_distribution<uint32_t> distribution(0, 200); + library->fill(tensor, distribution, i); + break; + } + default: + { + ARM_COMPUTE_ERROR("Unsupported data type."); + } + } + } + + // This is used to fill indices tensor with S32 datatype. + // Used to prevent ONLY having values that are out of bounds. + template <typename U> + void fill_indices(U &&tensor, int i, const TensorShape &shape) + { + // Calculate max indices the shape should contain. Add an arbitrary value to allow testing for some out of bounds values (In this case min dimension) + const int32_t max = std::min({shape[0] , shape[1], shape[2]}) + 1; + library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(0), static_cast<int32_t>(max)); + } + + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, + const TensorShape &out_shape, DataType data_type, const ScatterInfo info, bool inplace, bool padding, + QuantizationInfo a_qinfo, QuantizationInfo o_qinfo) + { + // 1. Create relevant tensors using ScatterInfo data structure. + // ---------------------------------------------------- + // In order - src, updates, indices, output. + TensorType src = create_tensor<TensorType>(shape_a, data_type, 1, a_qinfo); + TensorType updates = create_tensor<TensorType>(shape_b, data_type, 1, a_qinfo); + TensorType indices = create_tensor<TensorType>(shape_c, DataType::S32, 1, QuantizationInfo()); + TensorType dst = create_tensor<TensorType>(out_shape, data_type, 1, o_qinfo); + + FunctionType scatter; + + // Configure operator + // When scatter_info.zero_initialization is true, pass nullptr for src + // because dst does not need to be initialized with src values. + if(info.zero_initialization) + { + scatter.configure(nullptr, &updates, &indices, &dst, info); + } + else + { + if(inplace) + { + scatter.configure(&src, &updates, &indices, &src, info); + } + else + { + scatter.configure(&src, &updates, &indices, &dst, info); + } + } + + // Assertions + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(updates.info()->is_resizable()); + ARM_COMPUTE_ASSERT(indices.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + if(padding) + { + add_padding_x({ &src, &updates, &indices}); + + if(!inplace) + { + add_padding_x({ &dst }); + } + } + + // Allocate tensors + src.allocator()->allocate(); + updates.allocator()->allocate(); + indices.allocator()->allocate(); + + if(!inplace) + { + dst.allocator()->allocate(); + } + + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!updates.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!indices.info()->is_resizable()); + + if(!inplace) + { + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + } + + // Fill update (a) and indices (b) tensors. + fill(AccessorType(src), 0 + _hash); + fill(AccessorType(updates), 1+ _hash); + fill_indices(AccessorType(indices), 2 + _hash, out_shape); + + scatter.run(); + + if(inplace) + { + return src; + } + else + { + return dst; + } + } + + SimpleTensor<T> compute_reference(const TensorShape &a_shape, const TensorShape &b_shape, const TensorShape &c_shape, + const TensorShape &out_shape, DataType data_type, ScatterInfo info, QuantizationInfo a_qinfo, QuantizationInfo o_qinfo) + { + // Output Quantization not currently in use - fixture should be extended to support this. + ARM_COMPUTE_UNUSED(o_qinfo); + TensorShape src_shape = a_shape; + TensorShape updates_shape = b_shape; + TensorShape indices_shape = c_shape; + const int num_ind_dims = c_shape.num_dimensions(); + + // 1. Collapse batch index into a single dim if necessary for update tensor and indices tensor. + if(num_ind_dims >= 3) + { + indices_shape = indices_shape.collapsed_from(1); + updates_shape = updates_shape.collapsed_from(updates_shape.num_dimensions() - (num_ind_dims -1)); // Collapses batch dims + } + + // 2. Collapse data dims into a single dim. + // Collapse all src dims into 2 dims. First one holding data, the other being the index we iterate over. + src_shape.collapse(updates_shape.num_dimensions() - 1); // Collapse all data dims into single dim. + src_shape = src_shape.collapsed_from(1); // Collapse all index dims into a single dim + updates_shape.collapse(updates_shape.num_dimensions() - 1); // Collapse data dims (all except last dim which is batch dim) + + // Create reference tensors + SimpleTensor<T> src{ src_shape, data_type, 1, a_qinfo }; + SimpleTensor<T> updates{updates_shape, data_type, 1, QuantizationInfo() }; + SimpleTensor<int32_t> indices{ indices_shape, DataType::S32, 1, QuantizationInfo() }; + + // Fill reference + fill(src, 0 + _hash); + fill(updates, 1 + _hash); + fill_indices(indices, 2 + _hash, out_shape); + + // Calculate individual reference using collapsed shapes + return reference::scatter_layer<T>(src, updates, indices, out_shape, info); + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; + int32_t _hash{}; +}; + +// This fixture will use the same shape for updates as indices. +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class ScatterValidationFixture : public ScatterGenericValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape src_shape, TensorShape update_shape, TensorShape indices_shape, + TensorShape out_shape, DataType data_type, ScatterFunction func, bool zero_init, bool inplace, bool padding) + { + ScatterGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, update_shape, + indices_shape, out_shape, data_type, ScatterInfo(func, zero_init), inplace, padding, + QuantizationInfo(), QuantizationInfo()); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_SCATTERLAYERFIXTURE_H diff --git a/tests/validation/fixtures/ScharrFixture.h b/tests/validation/fixtures/ScharrFixture.h index 204ffc6767..b54a9d29e6 100644 --- a/tests/validation/fixtures/ScharrFixture.h +++ b/tests/validation/fixtures/ScharrFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -66,7 +66,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class ScharrValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, BorderMode border_mode, Format format, GradientDimension gradient_dimension) { // Generate a random constant value @@ -120,18 +119,18 @@ protected: ARM_COMPUTE_ERROR("Gradient dimension not supported"); } - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst_x.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst_y.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst_x.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst_y.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst_x.allocator()->allocate(); dst_y.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst_x.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst_y.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst_x.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst_y.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/SelectFixture.h b/tests/validation/fixtures/SelectFixture.h index 96a7c8666a..8cb6f062f9 100644 --- a/tests/validation/fixtures/SelectFixture.h +++ b/tests/validation/fixtures/SelectFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -63,7 +63,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class SelectValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, bool has_same_same_rank, DataType data_type) { TensorShape condition_shape = detail::select_condition_shape(shape, has_same_same_rank); @@ -97,10 +96,10 @@ protected: FunctionType select; select.configure(&c_t, &x_t, &y_t, &dst_t); - ARM_COMPUTE_EXPECT(c_t.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(x_t.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(y_t.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst_t.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(c_t.info()->is_resizable()); + ARM_COMPUTE_ASSERT(x_t.info()->is_resizable()); + ARM_COMPUTE_ASSERT(y_t.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst_t.info()->is_resizable()); // Allocate tensors c_t.allocator()->allocate(); @@ -108,10 +107,10 @@ protected: y_t.allocator()->allocate(); dst_t.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!c_t.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!x_t.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!y_t.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst_t.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!c_t.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!x_t.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!y_t.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst_t.info()->is_resizable()); // Fill tensors fill_bool(AccessorType(c_t), 0); diff --git a/tests/validation/fixtures/SliceOperationsFixtures.h b/tests/validation/fixtures/SliceOperationsFixtures.h index c1e046e427..b1f91ea2e0 100644 --- a/tests/validation/fixtures/SliceOperationsFixtures.h +++ b/tests/validation/fixtures/SliceOperationsFixtures.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,6 @@ #include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/IAccessor.h" -#include "tests/RawLutAccessor.h" #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" #include "tests/validation/Helpers.h" @@ -46,7 +45,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class SliceFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, Coordinates starts, Coordinates ends, DataType data_type) { _target = compute_target(shape, starts, ends, data_type); @@ -70,15 +68,15 @@ protected: FunctionType slice; slice.configure(&src, &dst, starts, ends); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); @@ -109,7 +107,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class StridedSliceFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, Coordinates starts, Coordinates ends, BiStrides strides, int32_t begin_mask, int32_t end_mask, int32_t shrink_mask, @@ -139,15 +136,15 @@ protected: FunctionType strided_slice; strided_slice.configure(&src, &dst, starts, ends, strides, begin_mask, end_mask, shrink_mask); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h index c39ab740cd..f4bf8df9c0 100644 --- a/tests/validation/fixtures/SoftmaxLayerFixture.h +++ b/tests/validation/fixtures/SoftmaxLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class SoftmaxValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, float beta, size_t axis) { _quantization_info = quantization_info; @@ -91,15 +90,15 @@ protected: FunctionType smx_layer; smx_layer.configure(&src, &dst, beta, axis); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -131,7 +130,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class SoftmaxValidationFixture : public SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T, IS_LOG> { public: - template <typename...> void setup(TensorShape shape, DataType data_type, float beta, size_t axis) { SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T, IS_LOG>::setup(shape, @@ -146,7 +144,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class SoftmaxValidationQuantizedFixture : public SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T, IS_LOG> { public: - template <typename...> void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, float beta, size_t axis) { SoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T, IS_LOG>::setup(shape, diff --git a/tests/validation/fixtures/SpaceToBatchFixture.h b/tests/validation/fixtures/SpaceToBatchFixture.h index c4076e6ed6..964e511301 100644 --- a/tests/validation/fixtures/SpaceToBatchFixture.h +++ b/tests/validation/fixtures/SpaceToBatchFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -39,7 +39,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class SpaceToBatchLayerValidationGenericFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) { @@ -79,10 +78,10 @@ protected: FunctionType space_to_batch; space_to_batch.configure(&input, &block_shape, &paddings, &output); - ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(block_shape.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(paddings.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(block_shape.info()->is_resizable()); + ARM_COMPUTE_ASSERT(paddings.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output.info()->is_resizable()); // Allocate tensors input.allocator()->allocate(); @@ -90,10 +89,10 @@ protected: paddings.allocator()->allocate(); output.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!block_shape.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!paddings.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!block_shape.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!paddings.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output.info()->is_resizable()); // Fill tensors fill(AccessorType(input), 0); @@ -140,7 +139,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class SpaceToBatchLayerValidationFixture : public SpaceToBatchLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout) { @@ -152,7 +150,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class SpaceToBatchLayerValidationQuantizedFixture : public SpaceToBatchLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T> { public: - template <typename...> void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) { diff --git a/tests/validation/fixtures/SpaceToDepthFixture.h b/tests/validation/fixtures/SpaceToDepthFixture.h index 45ea34b0f5..2d2e9fad7d 100644 --- a/tests/validation/fixtures/SpaceToDepthFixture.h +++ b/tests/validation/fixtures/SpaceToDepthFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,6 +24,7 @@ #ifndef ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_FIXTURE #define ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_FIXTURE +#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "tests/Globals.h" #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" @@ -39,7 +40,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class SpaceToDepthLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, TensorShape output_shape, const int block_shape, DataType data_type, DataLayout data_layout) { _target = compute_target(input_shape, output_shape, block_shape, data_type, data_layout); @@ -69,19 +69,25 @@ protected: TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout); TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout); + auto calc_out_shape = misc::shape_calculator::compute_space_to_depth_shape(input.info(), block_shape); + ARM_COMPUTE_ASSERT(output_shape[0] == calc_out_shape[0]); + ARM_COMPUTE_ASSERT(output_shape[1] == calc_out_shape[1]); + ARM_COMPUTE_ASSERT(output_shape[2] == calc_out_shape[2]); + ARM_COMPUTE_ASSERT(output_shape[3] == calc_out_shape[3]); + // Create and configure function FunctionType space_to_depth; space_to_depth.configure(&input, &output, block_shape); - ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(output.info()->is_resizable()); // Allocate tensors input.allocator()->allocate(); output.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!output.info()->is_resizable()); // Fill tensors fill(AccessorType(input), 0); diff --git a/tests/validation/fixtures/SplitFixture.h b/tests/validation/fixtures/SplitFixture.h index 03ff41e993..203925329c 100644 --- a/tests/validation/fixtures/SplitFixture.h +++ b/tests/validation/fixtures/SplitFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,6 @@ #include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/IAccessor.h" -#include "tests/RawLutAccessor.h" #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" #include "tests/validation/Helpers.h" @@ -48,7 +47,6 @@ template <typename TensorType, typename ITensorType, typename AccessorType, type class SplitFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, unsigned int axis, unsigned int splits, DataType data_type) { _target = compute_target(shape, axis, splits, data_type); @@ -77,7 +75,7 @@ protected: FunctionType split; split.configure(&src, dsts_ptr, axis); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t) { return t.info()->is_resizable(); @@ -91,7 +89,7 @@ protected: dsts[i].allocator()->allocate(); } - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t) { return !t.info()->is_resizable(); @@ -150,7 +148,6 @@ template <typename TensorType, typename ITensorType, typename AccessorType, type class SplitShapesFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, unsigned int axis, std::vector<TensorShape> split_shapes, DataType data_type) { _target = compute_target(shape, axis, split_shapes, data_type); @@ -186,7 +183,7 @@ protected: FunctionType split; split.configure(&src, dsts_ptr, axis); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t) { return t.info()->is_resizable(); @@ -200,7 +197,7 @@ protected: dsts[i].allocator()->allocate(); } - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t) { return !t.info()->is_resizable(); diff --git a/tests/validation/fixtures/StackLayerFixture.h b/tests/validation/fixtures/StackLayerFixture.h index 7bf63a3ebc..7dd8fe47dc 100644 --- a/tests/validation/fixtures/StackLayerFixture.h +++ b/tests/validation/fixtures/StackLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE -#define ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_STACKLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_STACKLAYERFIXTURE_H #include "arm_compute/core/Helpers.h" #include "arm_compute/core/TensorShape.h" @@ -52,10 +52,9 @@ template <typename TensorType, typename AbstractTensorType, typename AccessorTyp class StackLayerValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape_src, int axis, DataType data_type, int num_tensors) { - _target = compute_target(shape_src, axis, data_type, num_tensors); + _target = compute_target(shape_src, axis, data_type, num_tensors, false /* add_x_padding */); _reference = compute_reference(shape_src, axis, data_type, num_tensors); } @@ -66,7 +65,7 @@ protected: library->fill_tensor_uniform(tensor, i); } - TensorType compute_target(TensorShape shape_src, int axis, DataType data_type, int num_tensors) + TensorType compute_target(TensorShape shape_src, int axis, DataType data_type, int num_tensors, bool add_x_padding) { std::vector<TensorType> tensors(num_tensors); std::vector<AbstractTensorType *> src(num_tensors); @@ -76,7 +75,7 @@ protected: { tensors[i] = create_tensor<TensorType>(shape_src, data_type); src[i] = &(tensors[i]); - ARM_COMPUTE_EXPECT(tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(tensors[i].info()->is_resizable()); } // Create tensors @@ -91,18 +90,28 @@ protected: // Allocate and fill the input tensors for(int i = 0; i < num_tensors; ++i) { - ARM_COMPUTE_EXPECT(tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS); + if(add_x_padding) + { + add_padding_x({&tensors[i]}, DataLayout::NHWC); + } + + ARM_COMPUTE_ASSERT(tensors[i].info()->is_resizable()); tensors[i].allocator()->allocate(); - ARM_COMPUTE_EXPECT(!tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!tensors[i].info()->is_resizable()); // Fill input tensor fill(AccessorType(tensors[i]), i); } + if(add_x_padding) + { + add_padding_x({&dst}, DataLayout::NHWC); + } + // Allocate output tensor dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Compute stack function stack.run(); @@ -132,7 +141,21 @@ protected: TensorType _target{}; SimpleTensor<T> _reference{}; }; + +template <typename TensorType, typename AbstractTensorType, typename AccessorType, typename FunctionType, typename T> +class StackLayerWithPaddingValidationFixture : + public StackLayerValidationFixture<TensorType, AbstractTensorType, AccessorType, FunctionType, T> +{ +public: + using Parent = StackLayerValidationFixture<TensorType, AbstractTensorType, AccessorType, FunctionType, T>; + + void setup(TensorShape shape_src, int axis, DataType data_type, int num_tensors) + { + Parent::_target = Parent::compute_target(shape_src, axis, data_type, num_tensors, true /* add_x_padding */); + Parent::_reference = Parent::compute_reference(shape_src, axis, data_type, num_tensors); + } +}; } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_STACKLAYERFIXTURE_H diff --git a/tests/validation/fixtures/TileFixture.h b/tests/validation/fixtures/TileFixture.h index 0dfcc330f3..979eee5ab1 100644 --- a/tests/validation/fixtures/TileFixture.h +++ b/tests/validation/fixtures/TileFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class TileValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type, const Multiples &multiples) { _target = compute_target(shape, data_type, multiples); @@ -68,15 +67,15 @@ protected: FunctionType tile_func; tile_func.configure(&src, &dst, multiples); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); diff --git a/tests/validation/fixtures/TransposeFixture.h b/tests/validation/fixtures/TransposeFixture.h index 757e6c3c07..212c76cc9a 100644 --- a/tests/validation/fixtures/TransposeFixture.h +++ b/tests/validation/fixtures/TransposeFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_TRANSPOSE_FIXTURE -#define ARM_COMPUTE_TEST_TRANSPOSE_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_TRANSPOSEFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_TRANSPOSEFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -32,7 +32,7 @@ #include "tests/IAccessor.h" #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" -#include "tests/validation/reference/Transpose.h" +#include "tests/validation/reference/Permute.h" namespace arm_compute { @@ -44,7 +44,6 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ class TransposeValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape shape, DataType data_type) { _target = compute_target(shape, data_type); @@ -71,15 +70,15 @@ protected: FunctionType trans_func; trans_func.configure(&src, &dst); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src)); @@ -98,7 +97,7 @@ protected: // Fill reference fill(src); - return reference::transpose<T>(src); + return reference::permute<T>(src, PermutationVector(1U, 0U)); } TensorType _target{}; @@ -107,4 +106,4 @@ protected: } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_TRANSPOSE_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_TRANSPOSEFIXTURE_H diff --git a/tests/validation/fixtures/UNIT/ContextFixture.h b/tests/validation/fixtures/UNIT/ContextFixture.h new file mode 100644 index 0000000000..77cbc12320 --- /dev/null +++ b/tests/validation/fixtures/UNIT/ContextFixture.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_UNIT_CONTEXT_FIXTURE +#define ARM_COMPUTE_TEST_UNIT_CONTEXT_FIXTURE + +#include "arm_compute/Acl.hpp" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/validation/Validation.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +/** Test-case for AclDestroyContext + * + * Validate that AclDestroyContext behaves as expected when invalid inputs as context are given + * + * Test Steps: + * - Call AclDestroyContext with null context + * - Confirm that AclInvalidArgument is reported + * - Call AclDestroyContext on empty array + * - Confirm that AclInvalidArgument is reported + * - Call AclDestroyContext on an ACL object other than AclContext + * - Confirm that AclInvalidArgument is reported + * - Confirm that context is still nullptr + */ +template <AclTarget Target> +class DestroyInvalidContextFixture : public framework::Fixture +{ +public: + void setup() + { + AclContext ctx = nullptr; + std::array<char, 256> empty_array{}; + AclContext valid_ctx = nullptr; + ARM_COMPUTE_ASSERT(AclCreateContext(&valid_ctx, Target, nullptr) == AclStatus::AclSuccess); + ARM_COMPUTE_ASSERT(AclDestroyContext(ctx) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(AclDestroyContext(reinterpret_cast<AclContext>(empty_array.data())) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(ctx == nullptr); + ARM_COMPUTE_ASSERT(AclDestroyContext(valid_ctx) == AclStatus::AclSuccess); + }; +}; + +/** Test-case for AclCreateContext and AclDestroyContext + * + * Validate that AclCreateContext can create and destroy a context through the C API + * + * Test Steps: + * - Call AclCreateContext with valid target + * - Confirm that context is not nullptr and error code is AclSuccess + * - Destroy context + * - Confirm that AclSuccess is reported + */ +template <AclTarget Target> +class SimpleContextCApiFixture : public framework::Fixture +{ +public: + void setup() + { + AclContext ctx = nullptr; + ARM_COMPUTE_ASSERT(AclCreateContext(&ctx, Target, nullptr) == AclStatus::AclSuccess); + ARM_COMPUTE_ASSERT(ctx != nullptr); + ARM_COMPUTE_ASSERT(AclDestroyContext(ctx) == AclStatus::AclSuccess); + }; +}; + +/** Test-case for Context from the C++ interface + * + * Test Steps: + * - Create a Context obejct + * - Confirm that StatusCode::Success is reported + * - Confirm that equality operator works + * - Confirm that inequality operator works + */ +template <acl::Target Target> +class SimpleContextCppApiFixture : public framework::Fixture +{ +public: + void setup() + { + acl::StatusCode status = acl::StatusCode::Success; + acl::Context ctx(Target, &status); + ARM_COMPUTE_ASSERT(status == acl::StatusCode::Success); + + auto ctx_eq = ctx; + ARM_COMPUTE_ASSERT(ctx_eq == ctx); + + acl::Context ctx_ienq(Target, &status); + ARM_COMPUTE_ASSERT(status == acl::StatusCode::Success); + ARM_COMPUTE_ASSERT(ctx_ienq != ctx); + }; +}; + +/** Test-case for multiple contexes + * + * Validate that AclCreateContext can create/destroy multiple contexts with different options + * + * Test Steps: + * - Call AclCreateContext with different targets + * - Confirm that AclSuccess is reported + * - Destroy all contexts + * - Confirm that AclSuccess is reported + */ +template <AclTarget Target> +class MultipleContextsFixture : public framework::Fixture +{ +public: + void setup() + { + const unsigned int num_tests = 5; + std::array<AclContext, num_tests> ctxs{}; + for(unsigned int i = 0; i < num_tests; ++i) + { + ARM_COMPUTE_ASSERT(AclCreateContext(&ctxs[i], Target, nullptr) == AclStatus::AclSuccess); + ARM_COMPUTE_ASSERT(ctxs[i] != nullptr); + ARM_COMPUTE_ASSERT(AclDestroyContext(ctxs[i]) == AclStatus::AclSuccess); + } + }; +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_UNIT_CONTEXT_FIXTURE */ diff --git a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h index c3aa63b31b..3e96dcbf2d 100644 --- a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h +++ b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -74,14 +74,14 @@ public: void validate(bool validate_finalized) const { - ARM_COMPUTE_EXPECT(mm->pool_manager() != nullptr, framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(mm->lifetime_manager() != nullptr, framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(mm->pool_manager() != nullptr); + ARM_COMPUTE_ASSERT(mm->lifetime_manager() != nullptr); if(validate_finalized) { - ARM_COMPUTE_EXPECT(mm->lifetime_manager()->are_all_finalized(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(mm->lifetime_manager()->are_all_finalized()); } - ARM_COMPUTE_EXPECT(mm->pool_manager()->num_pools() == num_pools, framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(mm->pool_manager()->num_pools() == num_pools); } AllocatorType allocator; @@ -127,7 +127,6 @@ class DynamicTensorType3SingleFunction : public framework::Fixture using T = float; public: - template <typename...> void setup(TensorShape input_level0, TensorShape input_level1) { input_l0 = input_level0; @@ -159,15 +158,15 @@ protected: SimpleFunctionWrapperType layer(serv_internal.mm); layer.configure(&src, &dst); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Populate and validate memory manager serv_cross.populate(num_pools); @@ -251,7 +250,6 @@ class DynamicTensorType3ComplexFunction : public framework::Fixture using T = float; public: - template <typename...> void setup(std::vector<TensorShape> input_shapes, TensorShape weights_shape, TensorShape bias_shape, std::vector<TensorShape> output_shapes, PadStrideInfo info) { num_iterations = input_shapes.size(); @@ -313,8 +311,8 @@ protected: // Create and configure function _f_target->configure(&src, &_weights_target, &_bias_target, &dst, info, weights_info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); @@ -322,8 +320,8 @@ protected: _weights_target.allocator()->allocate(); _bias_target.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); @@ -390,7 +388,6 @@ class DynamicTensorType2PipelineFunction : public framework::Fixture using T = float; public: - template <typename...> void setup(std::vector<TensorShape> input_shapes) { _data_type = DataType::F32; diff --git a/tests/validation/fixtures/UNIT/MemoryManagerFixture.h b/tests/validation/fixtures/UNIT/MemoryManagerFixture.h index 14f22a8d21..3bc4844bd5 100644 --- a/tests/validation/fixtures/UNIT/MemoryManagerFixture.h +++ b/tests/validation/fixtures/UNIT/MemoryManagerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -100,8 +100,8 @@ protected: // Finalize memory manager mm->populate(_allocator, 1 /* num_pools */); - ARM_COMPUTE_EXPECT(mm->lifetime_manager()->are_all_finalized(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(mm->pool_manager()->num_pools() == 1, framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(mm->lifetime_manager()->are_all_finalized()); + ARM_COMPUTE_ASSERT(mm->pool_manager()->num_pools() == 1); // Fill tensors fill(AccessorType(src), 0); @@ -206,8 +206,8 @@ protected: // Finalize memory manager mm->populate(_allocator, 1 /* num_pools */); - ARM_COMPUTE_EXPECT(mm->lifetime_manager()->are_all_finalized(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(mm->pool_manager()->num_pools() == 1, framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(mm->lifetime_manager()->are_all_finalized()); + ARM_COMPUTE_ASSERT(mm->pool_manager()->num_pools() == 1); // Fill tensors (1st iteration) fill(AccessorType(src), 0); @@ -340,8 +340,8 @@ protected: // Finalize memory manager mm->populate(_allocator, 1 /* num_pools */); - ARM_COMPUTE_EXPECT(mm->lifetime_manager()->are_all_finalized(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(mm->pool_manager()->num_pools() == 1, framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(mm->lifetime_manager()->are_all_finalized()); + ARM_COMPUTE_ASSERT(mm->pool_manager()->num_pools() == 1); // Fill tensors (1st iteration) fill(AccessorType(src), 0); diff --git a/tests/validation/fixtures/UNIT/QueueFixture.h b/tests/validation/fixtures/UNIT/QueueFixture.h new file mode 100644 index 0000000000..bc93f5f120 --- /dev/null +++ b/tests/validation/fixtures/UNIT/QueueFixture.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_UNIT_QUEUE_FIXTURE +#define ARM_COMPUTE_TEST_UNIT_QUEUE_FIXTURE + +#include "arm_compute/Acl.hpp" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/validation/Validation.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +/** Test case for AclCreateQueue + * + * Validate that AclCreateQueue behaves as expected with invalid context + * + * Test Steps: + * - Call AclCreateQueue with an invalid context + * - Confirm that AclInvalidArgument is reported + * - Confirm that the queue is still nullptr + */ +class CreateQueueWithInvalidContextFixture : public framework::Fixture +{ +public: + void setup() + { + AclQueue queue = nullptr; + ARM_COMPUTE_ASSERT(AclCreateQueue(&queue, nullptr, nullptr) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(queue == nullptr); + }; +}; + +/** Test-case for AclCreateQueue + * + * Validate that AclCreateQueue behaves as expected with invalid options + * + * Test Steps: + * - Call AclCreateQueue with valid context but invalid options + * - Confirm that AclInvalidArgument is reported + * - Confirm that queue is still nullptr + */ +template <acl::Target Target> +class CreateQueuerWithInvalidOptionsFixture : public framework::Fixture +{ +public: + void setup() + { + acl::Context ctx(Target); + + // Check invalid tuning mode + AclQueueOptions invalid_queue_opts; + invalid_queue_opts.mode = static_cast<AclTuningMode>(-1); + + AclQueue queue = nullptr; + ARM_COMPUTE_ASSERT(AclCreateQueue(&queue, ctx.get(), &invalid_queue_opts) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(queue == nullptr); + }; +}; + +/** Test case for AclDestroyQueue +* +* Validate that AclDestroyQueue behaves as expected when an invalid queue is given +* +* Test Steps: +* - Call AclDestroyQueue with null queue +* - Confirm that AclInvalidArgument is reported +* - Call AclDestroyQueue on empty array +* - Confirm that AclInvalidArgument is reported +* - Call AclDestroyQueue on an ACL object other than AclQueue +* - Confirm that AclInvalidArgument is reported +* - Confirm that queue is still nullptr +*/ +template <acl::Target Target> +class DestroyInvalidQueueFixture : public framework::Fixture +{ +public: + void setup() + { + acl::Context ctx(Target); + + std::array<char, 256> empty_array{}; + AclQueue queue = nullptr; + + ARM_COMPUTE_ASSERT(AclDestroyQueue(queue) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(AclDestroyQueue(reinterpret_cast<AclQueue>(ctx.get())) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(AclDestroyQueue(reinterpret_cast<AclQueue>(empty_array.data())) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(queue == nullptr); + }; +}; + +/** Test case for AclCreateQueue + * + * Validate that a queue can be created successfully + * + * Test Steps: + * - Create a valid context + * - Create a valid queue + * - Confirm that AclSuccess is returned + */ +template <acl::Target Target> +class SimpleQueueFixture : public framework::Fixture +{ +public: + void setup() + { + acl::StatusCode err = acl::StatusCode::Success; + + acl::Context ctx(Target, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + acl::Queue queue(ctx, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + }; +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_UNIT_QUEUE_FIXTURE */ diff --git a/tests/validation/fixtures/UNIT/TensorFixture.h b/tests/validation/fixtures/UNIT/TensorFixture.h new file mode 100644 index 0000000000..bfe115b3ed --- /dev/null +++ b/tests/validation/fixtures/UNIT/TensorFixture.h @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_UNIT_TENSOR_FIXTURE +#define ARM_COMPUTE_TEST_UNIT_TENSOR_FIXTURE + +#include "arm_compute/Acl.hpp" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/validation/Validation.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +/** Test case for AclCreateTensor + * + * Validate that AclCreateTensor behaves as expected with invalid context + * + * Test Steps: + * - Call AclCreateTensor with an invalid context + * - Confirm that AclInvalidArgument is reported + * - Confirm that the tensor is still nullptr + */ +class CreateTensorWithInvalidContextFixture : public framework::Fixture +{ +public: + void setup() + { + AclTensor tensor = nullptr; + ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, nullptr, nullptr, false) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(tensor == nullptr); + }; +}; + +/** Test-case for AclCreateTensor + * + * Validate that AclCreateTensor behaves as expected on invalid descriptor + * + * Test Steps: + * - Call AclCreateTensor with valid context but invalid descriptor + * - Confirm that AclInvalidArgument is reported + * - Confirm that tensor is still nullptr + */ +template <acl::Target Target> +class CreateTensorWithInvalidDescriptorFixture : public framework::Fixture +{ +public: + void setup() + { + acl::Context ctx(Target); + AclTensor tensor = nullptr; + ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, ctx.get(), nullptr, false) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(tensor == nullptr); + + // Check invalid data type + AclTensorDescriptor invalid_desc; + invalid_desc.ndims = 4; + invalid_desc.data_type = static_cast<AclDataType>(-1); + ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, ctx.get(), &invalid_desc, false) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(tensor == nullptr); + + // Check invalid number of dimensions + invalid_desc.data_type = AclDataType::AclFloat32; + invalid_desc.ndims = 15; + ARM_COMPUTE_ASSERT(AclCreateTensor(&tensor, ctx.get(), &invalid_desc, false) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(tensor == nullptr); + }; +}; + +/** Test case for AclDestroyTensor +* +* Validate that AclDestroyTensor behaves as expected when an invalid tensor is given +* +* Test Steps: +* - Call AclDestroyTensor with null tensor +* - Confirm that AclInvalidArgument is reported +* - Call AclDestroyTensor on empty array +* - Confirm that AclInvalidArgument is reported +* - Call AclDestroyTensor on an ACL object other than AclTensor +* - Confirm that AclInvalidArgument is reported +* - Confirm that tensor is still nullptr +*/ +template <acl::Target Target> +class DestroyInvalidTensorFixture : public framework::Fixture +{ +public: + void setup() + { + acl::Context ctx(Target); + + std::array<char, 256> empty_array{}; + AclTensor tensor = nullptr; + + ARM_COMPUTE_ASSERT(AclDestroyTensor(tensor) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(AclDestroyTensor(reinterpret_cast<AclTensor>(ctx.get())) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(AclDestroyTensor(reinterpret_cast<AclTensor>(empty_array.data())) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(tensor == nullptr); + }; +}; + +/** Test case for AclCreateTensor + * + * Validate that a tensor can be created successfully + * + * Test Steps: + * - Create a valid context + * - Create a valid tensor + * - Confirm that AclSuccess is returned + */ +template <acl::Target Target> +class SimpleTensorFixture : public framework::Fixture +{ +public: + void setup() + { + acl::StatusCode err = acl::StatusCode::Success; + acl::Context ctx(Target, &err); + + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + }; +}; + +/** Test case for AclTensor + * + * Validate that multiple tensors can be created successfully + * Stress the possibility of memory leaks + * + * Test Steps: + * - Create a valid context + * - Create a lot of tensors + * - Confirm that AclSuccess is returned + */ +template <acl::Target Target> +class TensorStressFixture : public framework::Fixture +{ +public: + void setup() + { + acl::StatusCode err = acl::StatusCode::Success; + + acl::Context ctx(Target, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + const unsigned int num_tensors = 1024; + for(unsigned int i = 0; i < num_tensors; ++i) + { + acl::Tensor tensor(ctx, acl::TensorDescriptor({ 1024, 1024 }, acl::DataType::Float32), &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + } + }; +}; + +/** Test case for AclMapTensor + * + * Validate that map on an invalid object fails + * + * Test Steps: + * - Create a valid context + * - Pass and invalid object for mapping + * - Confirm that AclInvalidArgument is returned + */ +template <acl::Target Target> +class MapInvalidTensorFixture : public framework::Fixture +{ +public: + void setup() + { + acl::StatusCode err = acl::StatusCode::Success; + + acl::Context ctx(Target, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + void *handle = nullptr; + ARM_COMPUTE_ASSERT(AclMapTensor(reinterpret_cast<AclTensor>(ctx.get()), &handle) == AclStatus::AclInvalidArgument); + }; +}; + +/** Test case for AclMapTensor + * + * Validate that map of an unallocated pointer is nullptr + * + * Test Steps: + * - Create a valid context + * - Create a valid tensor without allocating + * - Map tensor + * - Check that mapping is nullptr + */ +template <acl::Target Target> +class MapNotAllocatedTensorFixture : public framework::Fixture +{ +public: + void setup() + { + acl::StatusCode err = acl::StatusCode::Success; + + acl::Context ctx(Target, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + acl::Tensor tensor(ctx, acl::TensorDescriptor({ 8, 8 }, acl::DataType::Float32), false /* allocate */, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + ARM_COMPUTE_ASSERT(tensor.map() == nullptr); + }; +}; + +/** Test case for AclMapTensor + * + * Validate that map of a valid tensor return a non-nullptr value + * + * Test Steps: + * - Create a valid context + * - Create a valid tensor while allocating + * - Map tensor + * - Check that mapping is not nullptr + */ +template <acl::Target Target> +class MapAllocatedTensorFixture : public framework::Fixture +{ +public: + void setup() + { + acl::StatusCode err = acl::StatusCode::Success; + + acl::Context ctx(Target, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + acl::Tensor tensor(ctx, acl::TensorDescriptor({ 8, 8 }, acl::DataType::Float32), &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + void *handle = tensor.map(); + ARM_COMPUTE_ASSERT(handle != nullptr); + ARM_COMPUTE_ASSERT(tensor.unmap(handle) == acl::StatusCode::Success); + }; +}; + +/** Test case for AclTensorImport + * + * Validate that an externally memory can be successfully imported + * + * Test Steps: + * - Create a valid context + * - Create a valid tensor without allocating + * - Allocate external memory + * - Import memory to the tensor + * - Check that imported pointer matches + */ +template <acl::Target Target> +class ImportMemoryFixture : public framework::Fixture +{ +public: + void setup() + { + acl::StatusCode err = acl::StatusCode::Success; + + acl::Context ctx(Target, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + const int32_t size = 8; + acl::Tensor tensor(ctx, acl::TensorDescriptor({ size }, acl::DataType::Float32), false /* allocate */, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + std::vector<float> data(size); + err = tensor.import(data.data(), acl::ImportType::Host); + + void *handle = tensor.map(); + ARM_COMPUTE_ASSERT(handle == data.data()); + ARM_COMPUTE_ASSERT(tensor.unmap(handle) == acl::StatusCode::Success); + } +}; +/** Test case for get_size() interface of Tensor + * + * + * Test Steps: + * - Create a valid context + * - Create a valid tensor + * - Compare the size value returned with the expected value + */ +template <acl::Target Target> +class TensorSizeFixture : public framework::Fixture +{ +public: + void setup() + { + acl::StatusCode err = acl::StatusCode::Success; + acl::Context ctx(Target, &err); + + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err); + + // size should be 6 elements (2x3) times 4 bytes (float32) = 24 bytes + constexpr size_t expected_size = 24; + ARM_COMPUTE_ASSERT(tensor.get_size() == expected_size); + }; +}; +/** Test case for get_size() dealing with invalid arguments + * + * Test Steps: + * - Test nullptr tensor can return a correct error + * - Create a valid tensor + * - Test C interface with null size argument can return a correct error + */ +template <acl::Target Target> +class InvalidTensorSizeFixture : public framework::Fixture +{ +public: + void setup() + { + // Null tensor + AclTensor null_tensor = nullptr; + uint64_t size{ 0 }; + ARM_COMPUTE_ASSERT(AclGetTensorSize(null_tensor, &size) == AclStatus::AclInvalidArgument); + + // Create valid tensor + acl::StatusCode err = acl::StatusCode::Success; + acl::Context ctx(Target, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err); + + // Null size argument + ARM_COMPUTE_ASSERT(AclGetTensorSize(tensor.get(), nullptr) == AclStatus::AclInvalidArgument); + }; +}; + +template <acl::Target Target> +class DescriptorConversionFixture : public framework::Fixture +{ + bool compare_descriptor(const AclTensorDescriptor &desc_a, const AclTensorDescriptor &desc_b) + { + auto are_descriptors_same = true; + + are_descriptors_same &= desc_a.ndims == desc_b.ndims; + are_descriptors_same &= desc_a.data_type == desc_b.data_type; + are_descriptors_same &= desc_a.shape != nullptr && desc_b.shape != nullptr; + + for(int32_t d = 0; d < desc_a.ndims; ++d) + { + are_descriptors_same &= desc_a.shape[d] == desc_b.shape[d]; + } + + // other attributes should be added here + + return are_descriptors_same; + } + +public: + void setup() + { + auto err{ acl::StatusCode::Success }; + auto ctx{ acl::Context(Target, &err) }; + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + auto desc{ acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32) }; + acl::Tensor tensor(ctx, desc, &err); + + auto desc_from_tensor = tensor.get_descriptor(); + + ARM_COMPUTE_ASSERT(compare_descriptor(*desc.get(), *desc_from_tensor.get())); + ARM_COMPUTE_ASSERT(desc == desc_from_tensor); + + // Test c interface with "prepopulated" descriptor + // Note: When c interface used, there are possibility of memory leak + // if members are not correctly deleted (e.g., shape). + // Since that is considered user's responsibility, we don't test here. + AclTensorDescriptor prepopulated_descriptor + { + 3, nullptr, AclDataType::AclBFloat16, nullptr, 0 + }; + + ARM_COMPUTE_ASSERT(AclGetTensorDescriptor(tensor.get(), &prepopulated_descriptor) == AclStatus::AclSuccess); + ARM_COMPUTE_ASSERT(compare_descriptor(*desc.get(), prepopulated_descriptor)); + ARM_COMPUTE_ASSERT(desc == acl::TensorDescriptor(prepopulated_descriptor)); + }; +}; + +template <acl::Target Target> +class InvalidDescriptorConversionFixture : public framework::Fixture +{ +public: + void setup() + { + // Null tensor + AclTensor null_tensor = nullptr; + AclTensorDescriptor desc{}; + ARM_COMPUTE_ASSERT(AclGetTensorDescriptor(null_tensor, &desc) == AclStatus::AclInvalidArgument); + + // Create valid tensor + acl::StatusCode err = acl::StatusCode::Success; + acl::Context ctx(Target, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + acl::Tensor tensor(ctx, acl::TensorDescriptor({ 2, 3 }, acl::DataType::Float32), &err); + + // Null size argument + ARM_COMPUTE_ASSERT(AclGetTensorDescriptor(tensor.get(), nullptr) == AclStatus::AclInvalidArgument); + }; +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_UNIT_TENSOR_FIXTURE */ diff --git a/tests/validation/fixtures/UNIT/TensorPackFixture.h b/tests/validation/fixtures/UNIT/TensorPackFixture.h new file mode 100644 index 0000000000..bc14631936 --- /dev/null +++ b/tests/validation/fixtures/UNIT/TensorPackFixture.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_UNIT_TENSORPACK_FIXTURE +#define ARM_COMPUTE_TEST_UNIT_TENSORPACK_FIXTURE + +#include "arm_compute/Acl.hpp" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/validation/Validation.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +/** Test case for AclCreateTensorPack + * + * Validate that AclCreateTensorPack behaves as expected with invalid context + * + * Test Steps: + * - Call AclCreateTensorPack with an invalid context + * - Confirm that AclInvalidArgument is reported + * - Confirm that the tensor pack is still nullptr + */ +class CreateTensorPackWithInvalidContextFixture : public framework::Fixture +{ +public: + void setup() + { + AclTensorPack pack = nullptr; + ARM_COMPUTE_ASSERT(AclCreateTensorPack(&pack, nullptr) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(pack == nullptr); + }; +}; + +/** Test case for AclDestroyTensorPack + * + * Validate that AclDestroyTensorPack behaves as expected when an invalid tensor pack is given + * + * Test Steps: + * - Call AclDestroyTensorPack with null tensor pack + * - Confirm that AclInvalidArgument is reported + * - Call AclDestroyTensorPack on empty array + * - Confirm that AclInvalidArgument is reported + * - Call AclDestroyTensorPack on an ACL object other than AclTensorPack + * - Confirm that AclInvalidArgument is reported + * - Confirm that tensor pack is still nullptr + */ +template <acl::Target Target> +class DestroyInvalidTensorPackFixture : public framework::Fixture +{ +public: + void setup() + { + acl::Context ctx(Target); + + std::array<char, 256> empty_array{}; + AclTensorPack pack = nullptr; + + ARM_COMPUTE_ASSERT(AclDestroyTensorPack(pack) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(AclDestroyTensorPack(reinterpret_cast<AclTensorPack>(ctx.get())) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(AclDestroyTensorPack(reinterpret_cast<AclTensorPack>(empty_array.data())) == AclStatus::AclInvalidArgument); + ARM_COMPUTE_ASSERT(pack == nullptr); + }; +}; + +/** Test case for AclPackTensor + * + * Validate that AclPackTensor behaves as expected when an invalid is being passed for packing + * + * Test Steps: + * - Create a valid TensorPack + * - Try to pack an empty object + * - Confirm that AclInvalidArgument is reported + * - Try to pack another API object other than tensor + * - Confirm that AclInvalidArgument is reported + */ +template <acl::Target Target> +class AddInvalidObjectToTensorPackFixture : public framework::Fixture +{ +public: + void setup() + { + auto err = acl::StatusCode::Success; + + acl::Context ctx(Target, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + acl::TensorPack pack(ctx, &err); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + + auto status = AclPackTensor(pack.get(), + reinterpret_cast<AclTensor>(ctx.get()), + AclTensorSlot::AclSrc); + ARM_COMPUTE_ASSERT(status == AclInvalidArgument); + + status = AclPackTensor(pack.get(), nullptr, AclTensorSlot::AclSrc); + ARM_COMPUTE_ASSERT(status == AclInvalidArgument); + }; +}; + +/** Test case for AclPackTensor + * + * Validate that a tensor can be added successfully to the TensorPack + * + * Test Steps: + * - Create a valid tensor pack + * - Create a valid tensor + * - Add tensor to the tensor pack + * - Confirm that AclSuccess is returned + */ +template <acl::Target Target> +class SimpleTensorPackFixture : public framework::Fixture +{ +public: + void setup() + { + acl::Context ctx(Target); + acl::TensorPack pack(ctx); + acl::Tensor t(ctx, acl::TensorDescriptor({ 3, 3, 5, 7 }, acl::DataType::Float32)); + + ARM_COMPUTE_ASSERT(pack.add(t, AclTensorSlot::AclSrc) == acl::StatusCode::Success); + }; +}; + +/** Test case for AclPackTensor + * + * Validate that multiple tensor can be added successfully to the TensorPack + * + * Test Steps: + * - Create a valid tensor pack + * - Create a list of valid tensors + * - Add tensors to the tensor pack + * - Confirm that AclSuccess is returned + */ +template <acl::Target Target> +class MultipleTensorsInPackFixture : public framework::Fixture +{ +public: + void setup() + { + acl::Context ctx(Target); + acl::TensorPack pack(ctx); + + const acl::TensorDescriptor desc(acl::TensorDescriptor({ 3, 3, 5, 7 }, acl::DataType::Float32)); + const size_t num_tensors = 256; + + std::vector<acl::Tensor> tensors; + for(unsigned int i = 0; i < num_tensors; ++i) + { + auto err = acl::StatusCode::Success; + tensors.emplace_back(acl::Tensor(ctx, desc, &err)); + ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success); + ARM_COMPUTE_ASSERT(pack.add(tensors.back(), static_cast<int32_t>(AclTensorSlot::AclSrcVec) + i) == acl::StatusCode::Success); + } + }; +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_UNIT_TENSORPACK_FIXTURE */ diff --git a/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h b/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h index af9f776ebc..f5e6071340 100644 --- a/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h +++ b/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h @@ -74,10 +74,10 @@ protected: TensorType compute_target() { // Create tensors - TensorType w1 = create_tensor<TensorType>(TensorShape(180000U, 150U), DataType::F32, 1); - TensorType b1 = create_tensor<TensorType>(TensorShape(150U), DataType::F32, 1); - TensorType src = create_tensor<TensorType>(TensorShape(1U, 150U, 1200U, _max_batches), DataType::F32, 1); - TensorType dst = create_tensor<TensorType>(TensorShape(150U, _max_batches), DataType::F32, 1); + TensorType w1 = create_tensor<TensorType>(TensorShape(6000U, 15U), DataType::F32, 1); + TensorType b1 = create_tensor<TensorType>(TensorShape(15U), DataType::F32, 1); + TensorType src = create_tensor<TensorType>(TensorShape(1U, 15U, 400U, _max_batches), DataType::F32, 1); + TensorType dst = create_tensor<TensorType>(TensorShape(15U, _max_batches), DataType::F32, 1); // Create and configure function FullyConnectedFunction fc_layer_1; @@ -105,9 +105,9 @@ protected: int diff = _max_batches - _cur_batches; auto new_src_padding = PaddingSize(src_padding.top, src_padding.right, src_padding.bottom + diff, src_padding.left); auto new_dst_padding = PaddingSize(dst_padding.top, dst_padding.right, dst_padding.bottom + diff, dst_padding.left); - src.allocator()->info().set_tensor_shape(TensorShape(1U, 150U, 1200U, _cur_batches)).set_is_resizable(true).extend_padding(new_src_padding); + src.allocator()->info().set_tensor_shape(TensorShape(1U, 15U, 400U, _cur_batches)).set_is_resizable(true).extend_padding(new_src_padding); src.allocator()->info().set_is_resizable(false); - dst.allocator()->info().set_tensor_shape(TensorShape(150U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding); + dst.allocator()->info().set_tensor_shape(TensorShape(15U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding); dst.allocator()->info().set_is_resizable(false); // Configure FC info @@ -129,16 +129,16 @@ protected: SimpleTensor<T> compute_reference() { // Create reference - SimpleTensor<T> w1{ TensorShape(180000U, 150U), DataType::F32 }; - SimpleTensor<T> b1{ TensorShape(150U), DataType::F32 }; - SimpleTensor<T> src{ TensorShape(1U, 150U, 1200U, _cur_batches), DataType::F32 }; + SimpleTensor<T> w1{ TensorShape(6000U, 15U), DataType::F32 }; + SimpleTensor<T> b1{ TensorShape(15U), DataType::F32 }; + SimpleTensor<T> src{ TensorShape(1U, 15U, 400U, _cur_batches), DataType::F32 }; // Fill reference fill(src, 5); fill(w1, 1); fill(b1, 2); - return reference::fully_connected_layer(src, w1, b1, TensorShape(150U, _cur_batches)); + return reference::fully_connected_layer(src, w1, b1, TensorShape(15U, _cur_batches)); } protected: diff --git a/tests/validation/fixtures/UnstackFixture.h b/tests/validation/fixtures/UnstackFixture.h index 53c79e180b..30b7dd5539 100644 --- a/tests/validation/fixtures/UnstackFixture.h +++ b/tests/validation/fixtures/UnstackFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,7 +47,6 @@ template <typename TensorType, typename ITensorType, typename AccessorType, type class UnstackValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, int axis, int num, DataType data_type) { _target = compute_target(input_shape, axis, num, data_type); @@ -80,10 +79,10 @@ protected: for(auto &out : output_slices) { out.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!out.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!out.info()->is_resizable()); } input_tensor.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!input_tensor.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!input_tensor.info()->is_resizable()); fill(AccessorType(input_tensor), 0); // Compute function unstack.run(); diff --git a/tests/validation/fixtures/WeightsReshapeFixture.h b/tests/validation/fixtures/WeightsReshapeFixture.h index 5c17b538d5..68bd8b689d 100644 --- a/tests/validation/fixtures/WeightsReshapeFixture.h +++ b/tests/validation/fixtures/WeightsReshapeFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,10 +45,9 @@ namespace validation using namespace arm_compute::misc::shape_calculator; template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class WeightsReshapeValidationFixture : public framework::Fixture +class WeightsReshapeOpValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, DataType data_type, bool has_bias, unsigned int num_groups) { const TensorShape output_shape = compute_weights_reshaped_shape(TensorInfo(input_shape, 1, data_type), has_bias, num_groups); @@ -73,34 +72,44 @@ protected: // Create and configure function FunctionType weights_reshape_func; - weights_reshape_func.configure(&src, (has_bias ? &bias : nullptr), &dst, num_groups); + weights_reshape_func.configure(src.info(), (has_bias ? bias.info() : nullptr), dst.info(), num_groups); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0); if(has_bias) { - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); bias.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); fill(AccessorType(bias), 1); } + arm_compute::ITensorPack pack = + { + { arm_compute::TensorType::ACL_SRC, &src }, + { arm_compute::TensorType::ACL_DST, &dst } + }; + + if(has_bias) + { + pack.add_const_tensor(arm_compute::TensorType::ACL_BIAS, &bias); + } // Compute function - weights_reshape_func.run(); + weights_reshape_func.run(pack); return dst; } diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h index 03ec920c4e..20b678b36c 100644 --- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h +++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE -#define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE +#ifndef ACL_TESTS_VALIDATION_FIXTURES_WINOGRADCONVOLUTIONLAYERFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_WINOGRADCONVOLUTIONLAYERFIXTURE_H #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" @@ -51,130 +51,36 @@ namespace validation { using namespace arm_compute::misc::shape_calculator; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true> -class WinogradConvolutionLayerValidationFixture : public framework::Fixture +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true, bool mixed_layout = false> +class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, - DataType data_type, ActivationLayerInfo act_info) + DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout) + { ARM_COMPUTE_UNUSED(dilation); - - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info); + _mixed_layout = mixed_layout; + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout); + _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info); } protected: - template <typename U> - void fill(U &&tensor, int i, float min, float max) - { - switch(tensor.data_type()) - { - case DataType::F16: - { - arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) }; - library->fill(tensor, distribution, i); - break; - } - case DataType::F32: - { - std::uniform_real_distribution<float> distribution(min, max); - library->fill(tensor, distribution, i); - break; - } - default: - { - ARM_COMPUTE_ERROR("Not supported"); - } - } - } - - TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info, - DataType data_type, ActivationLayerInfo act_info) - { - // Create tensors - TensorType src = create_tensor<TensorType>(input_shape, data_type, 1); - TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1); - TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1); - - // Create and configure function - FunctionType conv; - ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS); - conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info); - - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Allocate tensors - src.allocator()->allocate(); - weights.allocator()->allocate(); - dst.allocator()->allocate(); - bias.allocator()->allocate(); - - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Fill tensors - fill(AccessorType(src), 0, -1.f, 1.f); - fill(AccessorType(weights), 1, -1.f, 1.f); - fill(AccessorType(bias), 2, -1.f, 1.f); - - // Compute Winograd Convolution function - conv.run(); - - return dst; - } - - SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, ActivationLayerInfo act_info) + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { - // Create reference - SimpleTensor<T> src{ input_shape, data_type, 1 }; - SimpleTensor<T> weights{ weights_shape, data_type, 1 }; - SimpleTensor<T> bias{ bias_shape, data_type, 1 }; + const DataLayout data_layout = src.info()->data_layout(); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); - // Fill reference - fill(src, 0, -1.f, 1.f); - fill(weights, 1, -1.f, 1.f); - if(use_bias) - { - fill(bias, 2, -1.f, 1.f); - } - else - { - fill(bias, 2, 0.f, 0.f); - } - - SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info); + // Compute Convolution function + layer.run(); - return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out; + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout); + dst.info()->set_data_layout(data_layout); } - TensorType _target{}; - SimpleTensor<T> _reference{}; -}; - -template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true> -class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture -{ -public: - template <typename...> - void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, - DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout) - - { - ARM_COMPUTE_UNUSED(dilation); - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout); - _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info); - } - -protected: template <typename U> void fill(U &&tensor, int i, float min, float max) { @@ -221,10 +127,12 @@ protected: framework::LogLevel::ERRORS); conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + add_padding_x({ &src, &weights, &bias, &dst }, data_layout); // Allocate tensors src.allocator()->allocate(); @@ -232,19 +140,25 @@ protected: dst.allocator()->allocate(); bias.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0, -0.5f, 0.5f); fill(AccessorType(weights), 1, -0.5f, 0.5f); fill(AccessorType(bias), 2, -0.5f, 0.5f); - // Compute Winograd Convolution function - conv.run(); - + if(_mixed_layout) + { + mix_layout(conv, src, dst); + } + else + { + // Compute function + conv.run(); + } return dst; } @@ -315,28 +229,45 @@ protected: SimpleTensor<T1> filter_transform_out = reference::winograd_filter_transform<T1>(weights_t1, filter_transform_shape, winograd_info); SimpleTensor<T1> batched_gemm = reference::gemm<T1>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f); SimpleTensor<T1> conv_out = reference::winograd_output_transform<T1>(batched_gemm, bias_t1, output_transform_shape, winograd_info); - SimpleTensor<T> conv_out_t(std::move(copy_tensor<T, T1>(conv_out))); + SimpleTensor<T> conv_out_t(copy_tensor<T, T1>(conv_out)); return (act_info.enabled()) ? reference::activation_layer<T>(conv_out_t, act_info) : conv_out_t; } TensorType _target{}; SimpleTensor<T> _reference{}; + bool _mixed_layout{ false }; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class WinogradInputTransformValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type) { TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info); - - _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); - _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); + _mixed_layout = mixed_layout; + _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); + _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout_src = src.info()->data_layout(); + const DataLayout data_layout_dst = dst.info()->data_layout(); + + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout_src); + dst.info()->set_data_layout(data_layout_dst); + } + template <typename U> void fill(U &&tensor, int i, float min, float max) { @@ -375,22 +306,30 @@ protected: FunctionType transf; transf.configure(&src, &dst, winograd_info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + add_padding_x({ &src, &dst }, data_layout); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0, -1.f, 1.f); - // Compute Winograd input transform function - transf.run(); - + if(_mixed_layout) + { + mix_layout(transf, src, dst); + } + else + { + // Compute Winograd input transform function + transf.run(); + } return dst; } @@ -405,25 +344,43 @@ protected: return reference::winograd_input_transform<T>(src, output_shape, winograd_info); } + bool _mixed_layout{ false }; TensorType _target{}; SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class WinogradFilterTransformValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type) { WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */); TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info); - _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); - _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); + _mixed_layout = mixed_layout; + _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); + _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout_src = src.info()->data_layout(); + const DataLayout data_layout_dst = dst.info()->data_layout(); + + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout_src); + dst.info()->set_data_layout(data_layout_dst); + } + template <typename U> void fill(U &&tensor, int i, float min, float max) { @@ -463,21 +420,30 @@ protected: FunctionType filter_transform; filter_transform.configure(&src, &dst, winograd_info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + add_padding_x({ &src, &dst }, data_layout); // Allocate tensors src.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0, -1.f, 1.f); - filter_transform.run(); - + if(_mixed_layout) + { + mix_layout(filter_transform, src, dst); + } + else + { + // Compute Winograd filter transform function + filter_transform.run(); + } return dst; } @@ -492,15 +458,15 @@ protected: return reference::winograd_filter_transform<T>(src, output_shape, winograd_info); } + bool _mixed_layout{ false }; TensorType _target{}; SimpleTensor<T> _reference{}; }; -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> class WinogradOutputTransformValidationFixture : public framework::Fixture { public: - template <typename...> void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info = ActivationLayerInfo()) { _target = compute_target(input_shape, winograd_info, data_type, act_info); @@ -508,6 +474,23 @@ public: } protected: + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout_src = src.info()->data_layout(); + const DataLayout data_layout_dst = dst.info()->data_layout(); + + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout_src); + dst.info()->set_data_layout(data_layout_dst); + } + template <typename U> void fill(U &&tensor, int i, float min, float max) { @@ -545,25 +528,34 @@ protected: FunctionType output_transform; output_transform.configure(&src, &bias, &dst, winograd_info, act_info); - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + + add_padding_x({ &src, &bias, &dst }, winograd_info.output_data_layout); // Allocate tensors src.allocator()->allocate(); bias.allocator()->allocate(); dst.allocator()->allocate(); - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_ASSERT(!src.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // Fill tensors fill(AccessorType(src), 0, -1.f, 1.f); fill(AccessorType(bias), 1, -1.f, 1.f); - output_transform.run(); - + if(_mixed_layout) + { + mix_layout(output_transform, src, dst); + } + else + { + // Compute Winograd output transform function + output_transform.run(); + } return dst; } @@ -585,10 +577,11 @@ protected: return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output; } + bool _mixed_layout{ false }; TensorType _target{}; SimpleTensor<T> _reference{}; }; } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */ +#endif // ACL_TESTS_VALIDATION_FIXTURES_WINOGRADCONVOLUTIONLAYERFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h new file mode 100644 index 0000000000..ca4de11a15 --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2022-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DEPTHWISECONV2DFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DEPTHWISECONV2DFIXTURE_H + +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" + +#include "tests/CL/CLAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/validation/reference/DepthwiseConvolutionLayer.h" +#include "tests/validation/Validation.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuDepthwiseConv2dValidationGenericFixture : public framework::Fixture +{ +public: + using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value || + std::is_same<typename std::decay<T>::type, int8_t>::value, + int32_t, + T>::type; // If T: uint8_t or int8_t then TBias: int32_t, otherwise TBias: T + + void setup(TensorShape input_shape, + Size2D kernel_size, + const PadStrideInfo &pad_stride, + const Size2D &dilation, + const unsigned int depth_multiplier, + const DataType data_type, + const DataLayout data_layout) + { + ARM_COMPUTE_ERROR_ON(data_layout != + DataLayout::NHWC); // Dynamic fusion depthwise conv2d only supports NHWC layout + + DepthwiseConv2dAttributes dwc_conv2d_attr; + const Padding2D padding_2d(pad_stride.pad_left(), pad_stride.pad_right(), pad_stride.pad_top(), + pad_stride.pad_bottom()); + dwc_conv2d_attr.pad(padding_2d) + .stride(Size2D(pad_stride.stride().first, pad_stride.stride().second)) + .dilation(dilation) + .depth_multiplier(depth_multiplier) + .dimension_rounding_type(pad_stride.round()); + + // Calculate Output and Weight Shapes + TensorShape weights_shape = TensorShape(kernel_size.width, kernel_size.height); + + const TensorInfo in_info(input_shape, 1, data_type); + const TensorInfo we_info(weights_shape, 1, data_type); + + const ConvolutionInfo info{pad_stride, depth_multiplier, ActivationLayerInfo(), dilation}; + const TensorShape output_shape = + misc::shape_calculator::compute_depthwise_convolution_shape(in_info, we_info, info); + + weights_shape.set(2, output_shape.z()); + const TensorShape bias_shape = TensorShape(weights_shape[2]); + + _data_type = data_type; + _data_layout = data_layout; + _target = compute_target(input_shape, weights_shape, bias_shape, dwc_conv2d_attr); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, dwc_conv2d_attr); + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + switch (tensor.data_type()) + { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-1.0f, 1.0f}; + library->fill(tensor, distribution, i); + break; + } + case DataType::F32: + { + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, i); + break; + } + default: + library->fill_tensor_uniform(tensor, i); + } + } + + // Given input is in nchw format + TensorType compute_target(TensorShape input_shape, + TensorShape weights_shape, + const TensorShape &bias_shape, + const DepthwiseConv2dAttributes dwc_conv2d_attr) + { + ARM_COMPUTE_ERROR_ON(_data_layout != DataLayout::NHWC); + + // Our test shapes are assumed in NCHW data layout, thus the permutation + permute(input_shape, PermutationVector(2U, 0U, 1U)); + permute(weights_shape, PermutationVector(2U, 0U, 1U)); + + // Create a new workload sketch + auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + auto context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Create sketch tensors + ITensorInfo *input_info = context.create_tensor_info(TensorInfo(input_shape, 1, _data_type, _data_layout)); + ITensorInfo *weight_info = context.create_tensor_info(TensorInfo(weights_shape, 1, _data_type, _data_layout)); + ITensorInfo *bias_info = context.create_tensor_info(TensorInfo(bias_shape, 1, _data_type, _data_layout)); + ITensorInfo *dst_info = context.create_tensor_info(); + + ITensorInfo *ans_info = FunctionType::create_op(sketch, input_info, weight_info, bias_info, dwc_conv2d_attr); + GpuOutput::create_op(sketch, ans_info, dst_info); + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + // (Important) Allocate auxiliary tensor memory if there are any + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + + // Construct user tensors + TensorType t_input{}; + TensorType t_weight{}; + TensorType t_bias{}; + TensorType t_dst{}; + + // Initialize user tensors + t_input.allocator()->init(*input_info); + t_weight.allocator()->init(*weight_info); + t_bias.allocator()->init(*bias_info); + t_dst.allocator()->init(*dst_info); + + // Allocate and fill user tensors + t_input.allocator()->allocate(); + t_weight.allocator()->allocate(); + t_bias.allocator()->allocate(); + t_dst.allocator()->allocate(); + + fill(AccessorType(t_input), 0); + fill(AccessorType(t_weight), 1); + fill(AccessorType(t_bias), 2); + + // Run runtime + runtime.run({&t_input, &t_weight, &t_bias, &t_dst}); + return t_dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &input_shape, + const TensorShape &weights_shape, + const TensorShape &bias_shape, + const TensorShape &output_shape, + DepthwiseConv2dAttributes dwc_conv2d_attr) + { + // Create reference + SimpleTensor<T> src{input_shape, _data_type, 1}; + SimpleTensor<T> weight{weights_shape, _data_type, 1}; + SimpleTensor<TBias> bias{bias_shape, _data_type, 1}; + + fill(src, 0); + fill(weight, 1); + fill(bias, 2); + + auto src_nchw = src; + auto weights_nchw = weight; + auto bias_nchw = bias; + auto output_shape_nchw = output_shape; + + PadStrideInfo legacy_pad_stride(dwc_conv2d_attr.stride().x(), dwc_conv2d_attr.stride().y(), + dwc_conv2d_attr.pad().left, dwc_conv2d_attr.pad().right, + dwc_conv2d_attr.pad().top, dwc_conv2d_attr.pad().bottom, + DimensionRoundingType{}); + auto dst_nchw = + reference::depthwise_convolution(src_nchw, weights_nchw, bias_nchw, output_shape_nchw, legacy_pad_stride, + dwc_conv2d_attr.depth_multiplier(), dwc_conv2d_attr.dilation()); + return dst_nchw; + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; + DataType _data_type{}; + DataLayout _data_layout{}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuDepthwiseConv2dValidationFixture + : public DynamicFusionGpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, + Size2D kernel_size, + const PadStrideInfo &info, + const Size2D &dilation, + const unsigned int depth_multiplier, + DataType data_type, + DataLayout data_layout) + { + DynamicFusionGpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( + input_shape, kernel_size, info, dilation, depth_multiplier, data_type, data_layout); + } +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DEPTHWISECONV2DFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h new file mode 100644 index 0000000000..1f4e223b93 --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2022-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DIRECTCONV2DFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DIRECTCONV2DFIXTURE_H + +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/Conv2dAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" + +#include "tests/CL/CLAccessor.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/validation/reference/ConvolutionLayer.h" +#include "tests/validation/reference/Permute.h" +#include "tests/validation/Validation.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +template <typename U> +void fill(U &&tensor, int i) +{ + switch (tensor.data_type()) + { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-1.0f, 1.0f}; + library->fill(tensor, distribution, i); + break; + } + case DataType::F32: + { + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, i); + break; + } + default: + library->fill_tensor_uniform(tensor, i); + } +} + +} // namespace + +/** General Conv2d fixture + * Adapted from tests/validation/fixtures/ConvolutionLayerFixture.h + * TODO: Parameterize to be fully backend agnostic: COMPMID-5760; remove Gpu from name + */ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuConv2dValidationGenericFixture : public framework::Fixture +{ +public: + using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value || + std::is_same<typename std::decay<T>::type, int8_t>::value, + int32_t, + T>::type; // If T: uint8_t or int8_t then TBias: int32_t, otherwise TBias: T + + void setup(TensorShape input_shape, + TensorShape weights_shape, + TensorShape bias_shape, + TensorShape output_shape, + const PadStrideInfo &info, + const Size2D &dilation, + DataType data_type, + DataLayout data_layout, + QuantizationInfo quantization_info, + QuantizationInfo weight_quantization_info) + { + ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NHWC); // Dynamic fusion conv2d only supports NHWC layout + const Conv2dAttributes conv2d_attr = convert_pad_stride_info_to_conv_attr(info, dilation); + _data_type = data_type; + _data_layout = data_layout; + _is_quantized = is_data_type_quantized_asymmetric(data_type); + _quantization_info = quantization_info; + _weight_quantization_info = weight_quantization_info; + _bias_data_type = _is_quantized ? DataType::S32 : data_type; + _target = compute_target(input_shape, weights_shape, bias_shape, conv2d_attr); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, conv2d_attr); + } + +protected: + // Given input is in nchw format + TensorType compute_target(TensorShape input_shape, + TensorShape weights_shape, + const TensorShape &bias_shape, + Conv2dAttributes conv2d_attr) + { + ARM_COMPUTE_ERROR_ON(_data_layout != DataLayout::NHWC); + permute(input_shape, PermutationVector(2U, 0U, 1U)); + permute(weights_shape, PermutationVector(2U, 0U, 1U)); + CLScheduler::get().default_reinit(); + + // Create a new workload sketch + auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + auto context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Create sketch tensors + ITensorInfo *input_info = context.create_tensor_info(TensorInfo(input_shape, 1, _data_type, _data_layout)); + ITensorInfo *weight_info = context.create_tensor_info(TensorInfo(weights_shape, 1, _data_type, _data_layout)); + ITensorInfo *bias_info = context.create_tensor_info(TensorInfo(bias_shape, 1, _data_type, _data_layout)); + ITensorInfo *dst_info = context.create_tensor_info(); + + ITensorInfo *ans_info = FunctionType::create_op(sketch, input_info, weight_info, bias_info, conv2d_attr); + GpuOutput::create_op(sketch, ans_info, dst_info); + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + // (Important) Allocate auxiliary tensor memory if there are any + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + // Construct user tensors + TensorType t_input{}; + TensorType t_weight{}; + TensorType t_bias{}; + TensorType t_dst{}; + + // Initialize user tensors + t_input.allocator()->init(*input_info); + t_weight.allocator()->init(*weight_info); + t_bias.allocator()->init(*bias_info); + t_dst.allocator()->init(*dst_info); + + // Allocate and fill user tensors + t_input.allocator()->allocate(); + t_weight.allocator()->allocate(); + t_bias.allocator()->allocate(); + t_dst.allocator()->allocate(); + + fill(AccessorType(t_input), 0); + fill(AccessorType(t_weight), 1); + fill(AccessorType(t_bias), 2); + + // Run runtime + runtime.run({&t_input, &t_weight, &t_bias, &t_dst}); + return t_dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &input_shape, + const TensorShape &weights_shape, + const TensorShape &bias_shape, + const TensorShape &output_shape, + Conv2dAttributes conv2d_attr) + { + // Create reference + SimpleTensor<T> src{input_shape, _data_type, 1, _quantization_info}; + SimpleTensor<T> weight{weights_shape, _data_type, 1, _weight_quantization_info}; + SimpleTensor<TBias> bias{bias_shape, _data_type, 1, _quantization_info}; + + fill(src, 0); + fill(weight, 1); + fill(bias, 2); + + auto src_nchw = src; + auto weights_nchw = weight; + auto bias_nchw = bias; + auto output_shape_nchw = output_shape; + + PadStrideInfo legacy_pad_stride(conv2d_attr.stride().x(), conv2d_attr.stride().y(), conv2d_attr.pad().left, + conv2d_attr.pad().right, conv2d_attr.pad().top, conv2d_attr.pad().bottom, + DimensionRoundingType{}); + auto dst_nchw = reference::convolution_layer(src_nchw, weights_nchw, bias_nchw, output_shape_nchw, + legacy_pad_stride, conv2d_attr.dilation()); + return dst_nchw; + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; + DataType _data_type{}; + DataType _bias_data_type{}; + DataLayout _data_layout{}; + QuantizationInfo _quantization_info{}; + QuantizationInfo _weight_quantization_info{}; + bool _is_quantized = false; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuConv2dValidationFixture + : public DynamicFusionGpuConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, + TensorShape weights_shape, + TensorShape output_shape, + TensorShape bias_shape, + const PadStrideInfo &info, + const Size2D &dialation, + DataType data_type, + DataLayout data_layout, + QuantizationInfo quantization_info) + { + DynamicFusionGpuConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( + input_shape, weights_shape, output_shape, bias_shape, info, dialation, data_type, data_layout, + quantization_info, quantization_info); + } +}; + +/** Specific Conv2d method: Direct Conv2d fixture + * Adapted from tests/validation/fixtures/DirectConvolutionLayerFixture.h + * TODO: Parameterize to be fully backend agnostic: COMPMID-5760 + */ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionDirectConv2dValidationGenericFixture : public framework::Fixture +{ +public: + using TBias = + typename std::conditional<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T>::type; + + void setup(TensorShape input_shape, + int stride_x, + int stride_y, + int pad_x, + int pad_y, + unsigned int kernel_size, + unsigned int num_kernels, + DataType data_type, + QuantizationInfo quantization_info, + DataLayout data_layout) + { + ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NHWC); // Dynamic fusion conv2d only supports NHWC layout + + TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels); + const TensorShape bias_shape(num_kernels); + const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR); + const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; + + const Conv2dAttributes conv2d_attr = convert_pad_stride_info_to_conv_attr(info, {1U, 1U} /* dilation */); + + TensorInfo input_info = TensorInfo(input_shape, 1, data_type); + TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type); + + const TensorShape output_shape = + misc::shape_calculator::compute_deep_convolution_shape(input_info, weights_info, info); + + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, conv2d_attr, data_type, + bias_data_type, quantization_info, data_layout); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, + bias_data_type, quantization_info); + } + +protected: + TensorType compute_target(TensorShape input_shape, + TensorShape weights_shape, + const TensorShape &bias_shape, + TensorShape output_shape, + const Conv2dAttributes &conv2d_attr, + DataType data_type, + DataType bias_data_type, + QuantizationInfo quantization_info, + const DataLayout &data_layout) + { + ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NHWC); + ARM_COMPUTE_UNUSED(quantization_info); + // Dataset shapes are in NCHW layout + permute(input_shape, PermutationVector(2U, 0U, 1U)); + permute(weights_shape, PermutationVector(2U, 0U, 1U)); + permute(output_shape, PermutationVector(2U, 0U, 1U)); + + auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + auto context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Create sketch tensors + auto input_info = context.create_tensor_info(TensorInfo(input_shape, 1, data_type, data_layout)); + auto weight_info = context.create_tensor_info(TensorInfo(weights_shape, 1, data_type, data_layout)); + auto bias_info = context.create_tensor_info(TensorInfo(bias_shape, 1, bias_data_type, data_layout)); + auto dst_info = context.create_tensor_info(); + + ITensorInfo *ans_info = FunctionType::create_op(sketch, input_info, weight_info, bias_info, conv2d_attr); + GpuOutput::create_op(sketch, ans_info, dst_info); + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + // Construct user tensors + TensorType t_input{}; + TensorType t_weight{}; + TensorType t_bias{}; + TensorType t_dst{}; + + // Initialize user tensors + t_input.allocator()->init(*input_info); + t_weight.allocator()->init(*weight_info); + t_bias.allocator()->init(*bias_info); + t_dst.allocator()->init(*dst_info); + + ARM_COMPUTE_ASSERT(t_input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(t_weight.info()->is_resizable()); + ARM_COMPUTE_ASSERT(t_bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(t_dst.info()->is_resizable()); + + // Allocate and fill user tensors + t_input.allocator()->allocate(); + t_weight.allocator()->allocate(); + t_bias.allocator()->allocate(); + t_dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!t_input.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!t_weight.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!t_bias.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!t_dst.info()->is_resizable()); + + fill(AccessorType(t_input), 0); + fill(AccessorType(t_weight), 1); + fill(AccessorType(t_bias), 2); + + // Run runtime + runtime.run({&t_input, &t_weight, &t_bias, &t_dst}); + return t_dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &input_shape, + const TensorShape &weights_shape, + const TensorShape &bias_shape, + const TensorShape &output_shape, + const PadStrideInfo &info, + DataType data_type, + DataType bias_data_type, + QuantizationInfo quantization_info) + { + // Create reference + SimpleTensor<T> src{input_shape, data_type, 1, quantization_info}; + SimpleTensor<T> weights{weights_shape, data_type, 1, quantization_info}; + SimpleTensor<TBias> bias{bias_shape, bias_data_type, 1, quantization_info}; + + // Fill reference + fill(src, 0); + fill(weights, 1); + fill(bias, 2); + + SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info); + return dst; + } + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionDirectConv2dValidationFixture + : public DynamicFusionDirectConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, + int stride_x, + int stride_y, + int pad_x, + int pad_y, + unsigned int kernel_size, + unsigned int num_kernels, + DataType data_type, + DataLayout data_layout) + { + DynamicFusionDirectConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( + input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(), + data_layout); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_DIRECTCONV2DFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h new file mode 100644 index 0000000000..69bd0efbdc --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2022-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_ELEMENTWISEBINARYFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_ELEMENTWISEBINARYFIXTURE_H + +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" + +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/validation/reference/ElementwiseOperations.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuElementwiseBinaryValidationGenericFixture : public framework::Fixture +{ +public: + void setup(ArithmeticOperation ref_op, + const TensorShape &shape0, + const TensorShape &shape1, + const TensorShape &shape2, + DataType data_type, + bool is_inplace, + bool fuse_two_ops = false) + { + _ref_op = ref_op; + _is_inplace = is_inplace; + _data_type = data_type; + _fuse = fuse_two_ops; + ARM_COMPUTE_ERROR_ON_MSG(_fuse && shape2.total_size() == 0, "No shape2 provided for fusion of two ops."); + ARM_COMPUTE_ERROR_ON_MSG(_fuse && _is_inplace, "In place for fusing case not supported yet."); + _target = compute_target(shape0, shape1, shape2); + _reference = compute_reference(shape0, shape1, shape2); + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + if (is_data_type_float(tensor.data_type())) + { + switch (_ref_op) + { + case ArithmeticOperation::DIV: + library->fill_tensor_uniform_ranged(tensor, i, {std::pair<float, float>(-0.001f, 0.001f)}); + break; + case ArithmeticOperation::POWER: + library->fill_tensor_uniform(tensor, i, 0.0f, 5.0f); + break; + default: + library->fill_tensor_uniform(tensor, i); + } + } + else if (tensor.data_type() == DataType::S32) + { + switch (_ref_op) + { + case ArithmeticOperation::DIV: + library->fill_tensor_uniform_ranged(tensor, i, {std::pair<int32_t, int32_t>(-1U, 1U)}); + break; + default: + library->fill_tensor_uniform(tensor, i); + } + } + else + { + library->fill_tensor_uniform(tensor, i); + } + } + + TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, const TensorShape &shape2) + { + // Create a new workload sketch + auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + auto context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Fuse first element wise binary Op + ITensorInfo *lhs_info = context.create_tensor_info(TensorInfo(shape0, 1, _data_type)); + ITensorInfo *rhs_info = context.create_tensor_info(TensorInfo(shape1, 1, _data_type)); + ITensorInfo *dst_info = context.create_tensor_info(); + + ITensorInfo *rhs_info_fuse = nullptr; + + ITensorInfo *ans_info = FunctionType::create_op(sketch, lhs_info, rhs_info); + + if (_fuse) + { + rhs_info_fuse = context.create_tensor_info(TensorInfo(shape2, 1, _data_type)); + ITensorInfo *ans2_info = FunctionType::create_op(sketch, ans_info, rhs_info_fuse); + GpuOutput::create_op(sketch, ans2_info, dst_info); + } + else + { + GpuOutput::create_op(sketch, ans_info, dst_info); + } + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + // (Important) Allocate auxiliary tensor memory if there are any + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + + // Construct user tensors + TensorType t_lhs{}; + TensorType t_rhs{}; + TensorType t_rhs_fuse{}; + TensorType t_dst{}; + + // Initialize user tensors + t_lhs.allocator()->init(*lhs_info); + t_rhs.allocator()->init(*rhs_info); + t_dst.allocator()->init(*dst_info); + if (_fuse) + { + t_rhs_fuse.allocator()->init(*rhs_info_fuse); + } + + // Allocate and fill user tensors + // Instead of using ACL allocator, the user can choose to import memory into the tensors + t_lhs.allocator()->allocate(); + t_rhs.allocator()->allocate(); + t_dst.allocator()->allocate(); + if (_fuse) + { + t_rhs_fuse.allocator()->allocate(); + } + + fill(AccessorType(t_lhs), 0); + fill(AccessorType(t_rhs), 1); + if (_fuse) + { + fill(AccessorType(t_rhs_fuse), 2); + } + + // Run runtime + if (_fuse) + { + runtime.run({&t_lhs, &t_rhs, &t_rhs_fuse, &t_dst}); + } + else + { + runtime.run({&t_lhs, &t_rhs, &t_dst}); + } + + return t_dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, const TensorShape &shape2) + { + const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1); + const TensorShape out_shape_fuse = TensorShape::broadcast_shape(out_shape, shape1); + + // Create reference + SimpleTensor<T> ref_lhs{shape0, _data_type, 1, QuantizationInfo()}; + SimpleTensor<T> ref_rhs{shape1, _data_type, 1, QuantizationInfo()}; + SimpleTensor<T> ref_rhs_fuse{shape2, _data_type, 1, QuantizationInfo()}; + SimpleTensor<T> ref_dst{out_shape, _data_type, 1, QuantizationInfo()}; + SimpleTensor<T> ref_dst_fuse{out_shape_fuse, _data_type, 1, QuantizationInfo()}; + + // Fill reference + fill(ref_lhs, 0); + fill(ref_rhs, 1); + + reference::arithmetic_operation<T>(_ref_op, ref_lhs, ref_rhs, ref_dst, ConvertPolicy::WRAP); + if (_fuse) + { + fill(ref_rhs_fuse, 2); + reference::arithmetic_operation<T>(_ref_op, ref_dst, ref_rhs_fuse, ref_dst_fuse, ConvertPolicy::WRAP); + } + SimpleTensor<T> *ret = _fuse ? &ref_dst_fuse : &ref_dst; + return *ret; + } + + ArithmeticOperation _ref_op{ArithmeticOperation::ADD}; + TensorType _target{}; + SimpleTensor<T> _reference{}; + DataType _data_type{}; + DataLayout _data_layout{}; + bool _is_inplace{false}; + bool _fuse{false}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuElementwiseBinaryOneOpValidationFixture + : public DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(ArithmeticOperation ref_op, const TensorShape &shape0, DataType data_type, bool is_inplace) + { + DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( + ref_op, shape0, shape0, TensorShape(), data_type, is_inplace); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuElementwiseBinaryBroadcastOneOpValidationFixture + : public DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(ArithmeticOperation ref_op, + const TensorShape &shape0, + const TensorShape &shape1, + DataType data_type, + bool is_inplace) + { + DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( + ref_op, shape0, shape1, TensorShape(), data_type, is_inplace); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuElementwiseBinaryTwoOpsValidationFixture + : public DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(ArithmeticOperation ref_op, + const TensorShape &shape0, + const TensorShape &shape1, + const TensorShape &shape2, + DataType data_type, + bool is_inplace, + bool fuse_two_ops) + { + DynamicFusionGpuElementwiseBinaryValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( + ref_op, shape0, shape1, shape2, data_type, is_inplace, fuse_two_ops); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_ELEMENTWISEBINARYFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h new file mode 100644 index 0000000000..4c1cc94d3d --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2023-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_MATMULKERNELFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_MATMULKERNELFIXTURE_H + +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/MatMulAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuMatMul.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" + +#include "tests/CL/CLAccessor.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/reference/GEMM.h" +#include "tests/validation/reference/Permute.h" +#include "tests/validation/reference/ReshapeLayer.h" +#include "tests/validation/Validation.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +template <typename U> +void fill(U &&tensor, int i) +{ + switch (tensor.data_type()) + { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-1.0f, 1.0f}; + library->fill(tensor, distribution, i); + break; + } + case DataType::F32: + { + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, i); + break; + } + default: + library->fill_tensor_uniform(tensor, i); + } +} + +} // namespace +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuMatMulValidationGenericFixture : public framework::Fixture +{ +public: + void setup(TensorShape lhs_shape, + TensorShape rhs_shape, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + int M0, + int N0, + int K0, + bool export_rhs_to_cl_image, + DataType data_type) + { + //For brevity, the input shapes are assumed to be not-transposed for both a and b matrices. + if (transpose_a) + { + permute(lhs_shape, PermutationVector(1U, 0U)); + } + if (transpose_b) + { + permute(rhs_shape, PermutationVector(1U, 0U)); + } + + // Skip configurations unsupported by the device. + _device_supports_export_to_cl_image = image2d_from_buffer_supported(CLKernelLibrary::get().get_device()); + if (!_device_supports_export_to_cl_image && export_rhs_to_cl_image) + { + ARM_COMPUTE_TEST_INFO("cl_khr_image2d_from_buffer not supported. TEST skipped"); + framework::ARM_COMPUTE_PRINT_INFO(); + return; // Note: Also need to skip the validate in corresponding FIXTURE_DATA_TEST_CASEs. + } + + _target = compute_target(lhs_shape, rhs_shape, transpose_a, transpose_b, M0, N0, K0, export_rhs_to_cl_image, + data_type); + _reference = compute_reference(lhs_shape, rhs_shape, output_shape, transpose_a, transpose_b, data_type); + } + +protected: + TensorType compute_target(TensorShape &shape_a, + TensorShape &shape_b, + bool transpose_a, + bool transpose_b, + int M0, + int N0, + int K0, + bool export_rhs_to_cl_image, + DataType data_type) + { + ARM_COMPUTE_UNUSED(export_rhs_to_cl_image); + CLScheduler::get().default_reinit(); + + // Create a new workload sketch + auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + auto context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Create sketch tensors + ITensorInfo *lhs_info = context.create_tensor_info(TensorInfo(shape_a, 1, data_type)); + ITensorInfo *rhs_info = context.create_tensor_info(TensorInfo(shape_b, 1, data_type)); + ITensorInfo *dst_info = context.create_tensor_info(); + + MatMulAttributes matmul_attr{}; + matmul_attr.adj_lhs(transpose_a); + matmul_attr.adj_rhs(transpose_b); + + GpuMatMulSettings matmul_settings{}; + matmul_settings.m0(M0); + matmul_settings.n0(N0); + matmul_settings.k0(K0); + + ITensorInfo *ans_info = FunctionType::create_op(sketch, lhs_info, rhs_info, matmul_attr, matmul_settings); + GpuOutput::create_op(sketch, ans_info, dst_info); + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + + // Construct user tensors + TensorType t_lhs{}; + TensorType t_rhs{}; + TensorType t_dst{}; + + // Initialize user tensors + t_lhs.allocator()->init(*lhs_info); + t_rhs.allocator()->init(*rhs_info); + t_dst.allocator()->init(*dst_info); + + ARM_COMPUTE_ASSERT(t_lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(t_rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(t_dst.info()->is_resizable()); + + // Allocate and fill user tensors + t_lhs.allocator()->allocate(); + t_rhs.allocator()->allocate(); + t_dst.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!t_lhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!t_rhs.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!t_dst.info()->is_resizable()); + + fill(AccessorType(t_lhs), 0); + fill(AccessorType(t_rhs), 1); + + // Run runtime + runtime.run({&t_lhs, &t_rhs, &t_dst}); + + return t_dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &shape_a, + const TensorShape &shape_b, + const TensorShape &output_shape, + bool pretranspose_a, + bool pretranspose_b, + DataType data_type) + { + // We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 3D + // This is necessary unless we choose to extend gemm reference for 5D+ tensors + TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimZ); + TensorShape shape_a_collapsed = shape_a.collapsed_from(Window::DimZ); + TensorShape shape_b_collapsed = shape_b.collapsed_from(Window::DimZ); + + // Create reference + SimpleTensor<T> a{shape_a_collapsed, data_type, 1}; + SimpleTensor<T> b{shape_b_collapsed, data_type, 1}; + SimpleTensor<T> c{output_shape_collapsed, data_type, 1}; + + // Fill reference + fill(a, 0); + fill(b, 1); + + /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M), + therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K) + in order to be able to call reference implementation that works with (B x M x K) input. + Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */ + + // Define transposed shapes + TensorShape a_transposed_shape(a.shape()); + a_transposed_shape.set(0, a.shape().y()); + a_transposed_shape.set(1, a.shape().x()); + + TensorShape b_transposed_shape(b.shape()); + b_transposed_shape.set(0, b.shape().y()); + b_transposed_shape.set(1, b.shape().x()); + + // Define transposed tensors + SimpleTensor<T> a_transposed{a_transposed_shape, data_type}; + SimpleTensor<T> b_transposed{b_transposed_shape, data_type}; + + //pretranspose a if necessary + if (pretranspose_a) + { + a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U)); + } + + // pretranspose b if necessary + if (pretranspose_b) + { + b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U)); + } + + // Use transposed tensors if boolean enabled else use original tensors + SimpleTensor<T> result = + reference::gemm<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, 1.0f, 0.f); + + // We reshape the gemm output back if the tensor is high dimensional + if (output_shape_collapsed != output_shape) + { + // std::cout << "called reshape: \n"; + result = reference::reshape_layer(result, output_shape); + } + + return result; + } + + CLTensor _target{}; + SimpleTensor<T> _reference{}; + bool _device_supports_export_to_cl_image{false}; + bool _device_supports_mmul{false}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuMatMulValidationFixture + : public DynamicFusionGpuMatMulValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape lhs_shape, + TensorShape rhs_shape, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + int M0, + int N0, + int K0, + bool export_rhs_to_cl_image, + DataType data_type) + { + ARM_COMPUTE_UNUSED(export_rhs_to_cl_image); + DynamicFusionGpuMatMulValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( + lhs_shape, rhs_shape, output_shape, transpose_a, transpose_b, M0, N0, K0, + false /* export_rhs_to_cl_image bias */, data_type); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_MATMULKERNELFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h new file mode 100644 index 0000000000..b0c7143d91 --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2023-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE_H + +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/Pool2dAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h" + +#include "src/dynamic_fusion/utils/Utils.h" +#include "tests/CL/CLAccessor.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/reference/PoolingLayer.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuPool2dValidationGenericFixture : public framework::Fixture +{ +public: + void setup(TensorShape input_shape, const Pool2dAttributes &pool_attr, DataType data_type) + { + _target = compute_target(input_shape, pool_attr, data_type); + _reference = compute_reference( + input_shape, convert_pool_attr_to_pool_info(pool_attr, true /* mixed_precision */), data_type); + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + switch (tensor.data_type()) + { + case DataType::F16: + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-1.0f, 1.0f}; + library->fill(tensor, distribution, i); + break; + } + case DataType::F32: + { + std::uniform_real_distribution<float> distribution(-1.0f, 1.0f); + library->fill(tensor, distribution, i); + break; + } + default: + library->fill_tensor_uniform(tensor, i); + } + } + + // Given input is in nchw format + TensorType compute_target(TensorShape input_shape, const Pool2dAttributes &pool_attr, const DataType data_type) + { + CLScheduler::get().default_reinit(); + + // Change shape due to NHWC data layout, test shapes are NCHW + permute(input_shape, PermutationVector(2U, 0U, 1U)); + + // Create a new workload sketch + auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + auto context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Create sketch tensors + auto input_info = context.create_tensor_info(TensorInfo(input_shape, 1, data_type, DataLayout::NHWC)); + auto dst_info = context.create_tensor_info(); + + // Create Pool2dSettings + GpuPool2dSettings pool_settings = GpuPool2dSettings(); + + ITensorInfo *ans_info = FunctionType::create_op(sketch, input_info, pool_attr, pool_settings); + GpuOutput::create_op(sketch, ans_info, dst_info); + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + // (Important) Allocate auxiliary tensor memory if there are any + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + // Construct user tensors + TensorType t_input{}; + TensorType t_dst{}; + + // Initialize user tensors + t_input.allocator()->init(*input_info); + t_dst.allocator()->init(*dst_info); + + // Allocate and fill user tensors + t_input.allocator()->allocate(); + t_dst.allocator()->allocate(); + + fill(AccessorType(t_input), 0); + + // Run runtime + runtime.run({&t_input, &t_dst}); + return t_dst; + } + + SimpleTensor<T> compute_reference(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type) + { + // Create reference + SimpleTensor<T> src(shape, data_type, 1, QuantizationInfo()); + // Fill reference + fill(src, 0); + return reference::pooling_layer<T>(src, pool_info, QuantizationInfo(), nullptr, DataLayout::NCHW); + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuPool2dValidationFixture + : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, + PoolingType pool_type, + Size2D pool_size, + Padding2D pad, + Size2D stride, + bool exclude_padding, + DataType data_type) + { + DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( + input_shape, + Pool2dAttributes().pool_type(pool_type).pool_size(pool_size).pad(pad).stride(stride).exclude_padding( + exclude_padding), + data_type); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuPool2dSpecialValidationFixture + : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape input_shape, Pool2dAttributes pool_attr, DataType data_type) + { + DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( + input_shape, pool_attr, data_type); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute + +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ActivationFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ActivationFixture.h new file mode 100644 index 0000000000..c9ffbccbc7 --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/operators/ActivationFixture.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2023-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_ACTIVATIONFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_ACTIVATIONFIXTURE_H + +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" + +#include "tests/framework/Fixture.h" +#include "tests/validation/reference/ActivationLayer.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename... TArgs> +class DynamicFusionActivationValidationFixture : public framework::Fixture +{ +public: + void setup(TensorShape shape, bool fuse, DataType data_type, ActivationLayerInfo act_info, TArgs... args) + { + _fuse = fuse; + _data_type = data_type; + _function = act_info.activation(); + _target = compute_target(shape, args...); + _reference = compute_reference(shape, act_info); + } + +protected: + std::vector<T> get_boundary_values(T min, T max) + { + // This function will return a vector filled with the following values that can + // represent two partitions derived from equivalent partitioning. + // * Lower partition: min, min + delta, lower quarter (nominal), center - delta + // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max + const auto delta = is_data_type_float(_data_type) ? T(0.1f) : T(1); + const auto center_value = (min + max) / 2; + const auto lower_quarter = (min + center_value) / 2; + const auto upper_quarter = (center_value + max) / 2; + + std::vector<T> boundary_values{}; + + // To ensure all the inserted values are within the given range after subtracing/adding delta + auto insert_values = [&boundary_values, &min, &max](const std::initializer_list<T> &new_values) + { + for (auto &v : new_values) + { + if (v >= min && v <= max) + { + boundary_values.emplace_back(v); + } + } + }; + + insert_values({min, static_cast<T>(min + delta), static_cast<T>(lower_quarter), + static_cast<T>(center_value - delta)}); // lower partition + insert_values({static_cast<T>(center_value), static_cast<T>(center_value + delta), + static_cast<T>(upper_quarter), static_cast<T>(max - delta), max}); // upper partition + + return boundary_values; + } + + template <typename U> + void fill(U &&tensor) + { + float min_bound = 0; + float max_bound = 0; + std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type); + library->fill_static_values(tensor, get_boundary_values(static_cast<T>(min_bound), static_cast<T>(max_bound))); + } + + TensorType compute_target(const TensorShape &shape, TArgs... args) + { + // Create a new workload sketch + CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + GpuWorkloadContext context{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Create sketch tensors + ITensorInfo *src_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type)); + ITensorInfo *dst_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type)); + + ITensorInfo *ans_0_info = FunctionType::create_op(sketch, src_info, args...); + if (_fuse) + { + ITensorInfo *ans_1_info = FunctionType::create_op(sketch, ans_0_info, args...); + GpuOutput::create_op(sketch, ans_1_info, dst_info); + } + else + { + GpuOutput::create_op(sketch, ans_0_info, dst_info); + } + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + // Construct user tensors + TensorType t_src{}; + TensorType t_dst{}; + + // Initialize user tensors + t_src.allocator()->init(*src_info); + t_dst.allocator()->init(*dst_info); + + // Allocate and fill user tensors + t_src.allocator()->allocate(); + t_dst.allocator()->allocate(); + + fill(AccessorType(t_src)); + + // Run runtime + runtime.run({&t_src, &t_dst}); + + return t_dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo act_info) + { + // Create reference + SimpleTensor<T> src{shape, _data_type, 1}; + + // Fill reference + fill(src); + + auto tmp = reference::activation_layer<T>(src, act_info); + + if (_fuse) + { + auto dst = reference::activation_layer<T>(tmp, act_info); + return dst; + } + else + { + return tmp; + } + } + +protected: + ActivationLayerInfo::ActivationFunction _function{}; + bool _fuse{false}; + DataType _data_type{}; + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionSigmoidValidationFixture + : public DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape shape, bool fuse, DataType data_type) + { + ActivationLayerInfo act_info{ActivationLayerInfo::ActivationFunction::LOGISTIC}; + DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, fuse, + data_type, act_info); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionTanhValidationFixture + : public DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape shape, bool fuse, DataType data_type) + { + ActivationLayerInfo act_info{ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f}; + DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, fuse, + data_type, act_info); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute + +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_ACTIVATIONFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h new file mode 100644 index 0000000000..08fffb305b --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2022-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE_H + +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" + +#include "tests/framework/Fixture.h" +#include "tests/validation/reference/DepthConvertLayer.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2> +class DynamicFusionCastValidationFixture : public framework::Fixture +{ +public: + void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy) + { + _target = compute_target(shape, dt_in, dt_out, policy); + _reference = compute_reference(shape, dt_in, dt_out, policy); + } + +protected: + template <typename U> + void fill(U &&tensor, int i, DataType dt_in, DataType dt_out) + { + // Restricting range to avoid inf values + if (dt_out == DataType::F16) + { + constexpr int signed_min = -32000; + constexpr int signed_max = 32000; + constexpr int unsigned_min = 0; + constexpr int unsigned_max = 65000; + + switch (dt_in) + { + case DataType::U8: + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + case DataType::S8: + case DataType::F32: + { + library->fill_tensor_uniform(tensor, i); + break; + } + case DataType::U16: + { + library->fill_tensor_uniform(tensor, i, static_cast<uint16_t>(unsigned_min), + static_cast<uint16_t>(unsigned_max)); + break; + } + case DataType::S16: + { + library->fill_tensor_uniform(tensor, i, static_cast<int16_t>(signed_min), + static_cast<int16_t>(signed_max)); + break; + } + case DataType::U32: + { + library->fill_tensor_uniform(tensor, i, static_cast<uint32_t>(unsigned_min), + static_cast<uint32_t>(unsigned_max)); + break; + } + case DataType::S32: + { + library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(signed_min), + static_cast<int32_t>(signed_max)); + break; + } + default: + ARM_COMPUTE_ERROR("NOT SUPPORTED!"); + } + } + else + { + library->fill_tensor_uniform(tensor, i); + } + } + + // Given input is in nchw format + TensorType + compute_target(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy) + { + // Create a new workload sketch + auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + auto context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Create sketch tensors + // Here, we use DataLayout::NCHW just for the test. However, the optimal data layout to + // be used with dynamic fusion is NHWC + ITensorInfo *src_info = + context.create_tensor_info(TensorInfo(shape, 1, dt_in, DataLayout::NCHW)); // layout is not important + ITensorInfo *dst_info = context.create_tensor_info(); + + CastAttributes attributes; + attributes.convert_policy(policy).data_type(dt_out); + + ITensorInfo *ans_info = FunctionType::create_op(sketch, src_info, attributes); + GpuOutput::create_op(sketch, ans_info, dst_info); + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + // (Important) Allocate auxiliary tensor memory if there are any + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + + // Construct user tensors + TensorType t_src{}; + TensorType t_dst{}; + + // Initialize user tensors + t_src.allocator()->init(*src_info); + t_dst.allocator()->init(*dst_info); + + // Allocate and fill user tensors + t_src.allocator()->allocate(); + t_dst.allocator()->allocate(); + + fill(AccessorType(t_src), 0, dt_in, dt_out); + + // Run runtime + runtime.run({&t_src, &t_dst}); + return t_dst; + } + + SimpleTensor<T2> + compute_reference(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy) + { + // Create reference + SimpleTensor<T1> src{shape, dt_in, 1}; + + // Fill reference + fill(src, 0, dt_in, dt_out); + + return reference::depth_convert<T1, T2>(src, dt_out, policy, 0); + } + + TensorType _target{}; + SimpleTensor<T2> _reference{}; +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h new file mode 100644 index 0000000000..e8f6f83e42 --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2022-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE_H + +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" + +#include "tests/framework/Fixture.h" +#include "tests/validation/reference/ActivationLayer.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionClampValidationFixture : public framework::Fixture +{ +public: + void setup(TensorShape shape, ClampAttributes attributes, bool fuse, DataType data_type) + { + // CLAMP is implemented as LU_BOUNDED_RELU with the alpha and beta variables swapped. + ActivationLayerInfo act_info{ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, attributes.max_val(), attributes.min_val() }; + + _fuse = fuse; + _attributes = attributes; + _data_type = data_type; + _target = compute_target(shape, attributes); + _reference = compute_reference(shape, act_info); + } + +protected: + std::vector<T> get_boundary_values(T min, T max) + { + // This function will return a vector filled with the following values that can + // represent two partitions derived from equivalent partitioning. + // * Lower partition: min, min + delta, lower quarter (nominal), center - delta + // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max + const auto delta = is_data_type_float(_data_type) ? T(0.1f) : T(1); + const auto center_value = (min + max) / 2; + const auto lower_quarter = (min + center_value) / 2; + const auto upper_quarter = (center_value + max) / 2; + + std::vector<T> boundary_values{}; + + // To ensure all the inserted values are within the given range after subtracing/adding delta + auto insert_values = [&boundary_values, &min, &max](const std::initializer_list<T> &new_values) + { + for(auto &v : new_values) + { + if(v >= min && v <= max) + { + boundary_values.emplace_back(v); + } + } + }; + + insert_values({ min, static_cast<T>(min + delta), static_cast<T>(lower_quarter), static_cast<T>(center_value - delta) }); // lower partition + insert_values({ static_cast<T>(center_value), static_cast<T>(center_value + delta), static_cast<T>(upper_quarter), static_cast<T>(max - delta), max }); // upper partition + + return boundary_values; + } + + template <typename U> + void fill(U &&tensor) + { + float min_bound = 0; + float max_bound = 0; + std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, _data_type); + library->fill_static_values(tensor, get_boundary_values(static_cast<T>(min_bound), static_cast<T>(max_bound))); + } + + TensorType compute_target(const TensorShape &shape, ClampAttributes attributes) + { + // Create a new workload sketch + CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + GpuWorkloadContext context{ &cl_compile_ctx }; + GpuWorkloadSketch sketch{ &context }; + + // Create sketch tensors + ITensorInfo* src_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type)); + ITensorInfo* dst_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type)); + + ITensorInfo *ans_0_info = FunctionType::create_op(sketch, src_info, attributes); + if(_fuse) + { + ITensorInfo *ans_1_info = FunctionType::create_op(sketch, ans_0_info, attributes); + GpuOutput::create_op(sketch, ans_1_info, dst_info); + } + else + { + GpuOutput::create_op(sketch, ans_0_info, dst_info); + } + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + // Construct user tensors + TensorType t_src{}; + TensorType t_dst{}; + + // Initialize user tensors + t_src.allocator()->init(*src_info); + t_dst.allocator()->init(*dst_info); + + // Allocate and fill user tensors + t_src.allocator()->allocate(); + t_dst.allocator()->allocate(); + + fill(AccessorType(t_src)); + + // Run runtime + runtime.run({ &t_src, &t_dst }); + + return t_dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo act_info) + { + // Create reference + SimpleTensor<T> src{ shape, _data_type, 1, _quantization_info }; + + // Fill reference + fill(src); + + auto dst = reference::activation_layer<T>(src, act_info, _quantization_info); + return dst; + } + +protected: + QuantizationInfo _quantization_info{}; + ClampAttributes _attributes{}; + bool _fuse{ false }; + DataType _data_type{}; + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/operators/MulFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/MulFixture.h new file mode 100644 index 0000000000..f02aa5e36a --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/operators/MulFixture.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2023-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_MULFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_MULFIXTURE_H + +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" + +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/Globals.h" +#include "tests/validation/reference/PixelWiseMultiplication.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +/* We use a separate test fixture for Multiplication op instead of reusing ElementwiseBinaryFixture to avoid exposing + * the internal enum ElementwiseOp to the public utils/TypePrinters.h as required by the data test case macros + * to print the test data. + */ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionMulValidationFixture : public framework::Fixture +{ +public: + void setup(const TensorShape &shape0, + const TensorShape &shape1, + const TensorShape &shape2, + DataType data_type, + bool is_inplace, + bool fuse_two_ops = false) + { + _data_type = data_type; + _is_inplace = is_inplace; + _fuse = fuse_two_ops; + ARM_COMPUTE_ERROR_ON_MSG(_fuse && shape2.total_size() == 0, "No shape2 provided for fusion of two ops."); + ARM_COMPUTE_ERROR_ON_MSG(_fuse && _is_inplace, "In place for fusing case not supported yet."); + _target = compute_target(shape0, shape1, shape2); + _reference = compute_reference(shape0, shape1, shape2); + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + library->fill_tensor_uniform(tensor, i); + } + + TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, const TensorShape &shape2) + { + // Create a new workload sketch + auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + auto context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Fuse first multiplication op + ITensorInfo *lhs_info = context.create_tensor_info(TensorInfo(shape0, 1, _data_type)); + ITensorInfo *rhs_info = context.create_tensor_info(TensorInfo(shape1, 1, _data_type)); + ITensorInfo *dst_info = context.create_tensor_info(); + + ITensorInfo *rhs_info_fuse = nullptr; + + ITensorInfo *ans_info = FunctionType::create_op(sketch, lhs_info, rhs_info); + + if (_fuse) + { + rhs_info_fuse = context.create_tensor_info(TensorInfo(shape2, 1, _data_type)); + ITensorInfo *ans2_info = FunctionType::create_op(sketch, ans_info, rhs_info_fuse); + GpuOutput::create_op(sketch, ans2_info, dst_info); + } + else + { + GpuOutput::create_op(sketch, ans_info, dst_info); + } + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + // (Important) Allocate auxiliary tensor memory if there are any + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + + // Construct user tensors + TensorType t_lhs{}; + TensorType t_rhs{}; + TensorType t_rhs_fuse{}; + TensorType t_dst{}; + + // Initialize user tensors + t_lhs.allocator()->init(*lhs_info); + t_rhs.allocator()->init(*rhs_info); + t_dst.allocator()->init(*dst_info); + if (_fuse) + { + t_rhs_fuse.allocator()->init(*rhs_info_fuse); + } + + // Allocate and fill user tensors + // Instead of using ACL allocator, the user can choose to import memory into the tensors + t_lhs.allocator()->allocate(); + t_rhs.allocator()->allocate(); + t_dst.allocator()->allocate(); + if (_fuse) + { + t_rhs_fuse.allocator()->allocate(); + } + + fill(AccessorType(t_lhs), 0); + fill(AccessorType(t_rhs), 1); + if (_fuse) + { + fill(AccessorType(t_rhs_fuse), 2); + } + + // Run runtime + if (_fuse) + { + runtime.run({&t_lhs, &t_rhs, &t_rhs_fuse, &t_dst}); + } + else + { + runtime.run({&t_lhs, &t_rhs, &t_dst}); + } + + return t_dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, const TensorShape &shape2) + { + // Create reference + SimpleTensor<T> ref_lhs{shape0, _data_type, 1, QuantizationInfo()}; + SimpleTensor<T> ref_rhs{shape1, _data_type, 1, QuantizationInfo()}; + SimpleTensor<T> ref_rhs_fuse{shape2, _data_type, 1, QuantizationInfo()}; + + // Fill reference + fill(ref_lhs, 0); + fill(ref_rhs, 1); + SimpleTensor<T> ref_dst = reference::pixel_wise_multiplication<T, T, T>( + ref_lhs, ref_rhs, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_UP, _data_type, + QuantizationInfo()); + if (_fuse) + { + fill(ref_rhs_fuse, 2); + SimpleTensor<T> ref_dst_fuse = reference::pixel_wise_multiplication<T, T, T>( + ref_dst, ref_rhs_fuse, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_UP, _data_type, + QuantizationInfo()); + return ref_dst_fuse; + } + return ref_dst; + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; + DataType _data_type{}; + bool _is_inplace{false}; + bool _fuse{false}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionMulOneOpValidationFixture + : public DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape0, DataType data_type, bool is_inplace) + { + DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T>::setup( + shape0, shape0, TensorShape(), data_type, is_inplace); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionMulBroadcastValidationFixture + : public DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type, bool is_inplace) + { + DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T>::setup( + shape0, shape1, TensorShape(), data_type, is_inplace); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionMulTwoOpsValidationFixture + : public DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(const TensorShape &shape0, + const TensorShape &shape1, + const TensorShape &shape2, + DataType data_type, + bool is_inplace, + bool fuse_two_ops) + { + DynamicFusionMulValidationFixture<TensorType, AccessorType, FunctionType, T>::setup( + shape0, shape1, shape2, data_type, is_inplace, fuse_two_ops); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_MULFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h new file mode 100644 index 0000000000..bde3360940 --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2023-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESHAPEFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESHAPEFIXTURE_H + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/ReshapeAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuReshape.h" + +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/Globals.h" +#include "tests/validation/reference/ReshapeLayer.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionGpuReshapeLayerValidationFixture : public framework::Fixture +{ +public: + void setup(TensorShape input_shape, TensorShape output_shape, DataType data_type) + { + _target = compute_target(input_shape, output_shape, data_type); + _reference = compute_reference(input_shape, output_shape, data_type); + } + +protected: + template <typename U> + void fill(U &&tensor, int i) + { + library->fill_tensor_uniform(tensor, i); + } + + TensorType compute_target(TensorShape &input_shape, TensorShape &output_shape, DataType data_type) + { + // Check if indeed the input shape can be reshape to the output one + ARM_COMPUTE_ASSERT(input_shape.total_size() == output_shape.total_size()); + + // Create a new workload sketch + auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + auto context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Create sketch tensors + ITensorInfo *src_info = context.create_tensor_info(TensorInfo(input_shape, 1, data_type)); + ITensorInfo *dst_info = context.create_tensor_info(TensorInfo(output_shape, 1, data_type)); + ReshapeAttributes attributes; + attributes.shape(output_shape); + + ITensorInfo *ans_info = FunctionType::create_op(sketch, src_info, attributes); + GpuOutput::create_op(sketch, ans_info, dst_info); + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + // (Important) Allocate auxiliary tensor memory if there are any + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + + // Construct user tensors + TensorType t_src{}; + TensorType t_dst{}; + // Initialize user tensors + t_src.allocator()->init(*src_info); + t_dst.allocator()->init(*dst_info); + + // Allocate and fill user tensors + t_src.allocator()->allocate(); + t_dst.allocator()->allocate(); + + fill(AccessorType(t_src), 0); + + // Run runtime + runtime.run({&t_src, &t_dst}); + + return t_dst; + } + + SimpleTensor<T> + compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, DataType data_type) + { + // Create reference + SimpleTensor<T> src{input_shape, data_type}; + + // Fill reference + fill(src, 0); + + return reference::reshape_layer<T>(src, output_shape); + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; +/** [ReshapeLayer fixture] **/ +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESHAPEFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h new file mode 100644 index 0000000000..711767b66f --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h @@ -0,0 +1,272 @@ +/* +* Copyright (c) 2022-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESIZEFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESIZEFIXTURE_H + +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/ResizeAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h" + +#include "tests/CL/CLAccessor.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/SimpleTensor.h" +#include "tests/validation/reference/Permute.h" +#include "tests/validation/reference/Scale.h" +#include "tests/validation/Validation.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionResizeGenericValidationFixture : public framework::Fixture +{ +public: + void setup(TensorShape shape, + DataType data_type, + QuantizationInfo quantization_info, + DataLayout data_layout, + InterpolationPolicy interpolation_policy, + SamplingPolicy sampling_policy, + bool align_corners, + QuantizationInfo output_quantization_info) + { + _shape = shape; + _interpolation_policy = interpolation_policy; + _sampling_policy = sampling_policy; + _data_type = data_type; + _input_quantization_info = quantization_info; + _output_quantization_info = output_quantization_info; + _align_corners = align_corners; + _data_layout = data_layout; + + ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NHWC); // Dynamic fusion resize supports only NHWC layout + + generate_scale(shape); + + std::mt19937 generator(library->seed()); + std::uniform_int_distribution<uint32_t> distribution_u8(0, 255); + + _target = compute_target(shape); + _reference = compute_reference(shape); + } + +protected: + void generate_scale(const TensorShape &shape) + { + static constexpr float _min_scale{0.25f}; + static constexpr float _max_scale{3.f}; + + constexpr float max_width{8192.0f}; + constexpr float max_height{6384.0f}; + constexpr float min_width{1.f}; + constexpr float min_height{1.f}; + + std::mt19937 generator(library->seed()); + std::uniform_real_distribution<float> distribution_float(_min_scale, _max_scale); + + auto generate = [&](size_t input_size, float min_output, float max_output) -> int + { + const float generated_scale = distribution_float(generator); + const int output_size = static_cast<int>( + utility::clamp(static_cast<float>(input_size) * generated_scale, min_output, max_output)); + return output_size; + }; + + // Input shape is always given in NCHW layout. NHWC is dealt by permute in compute_target() + const int idx_width = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::WIDTH); + const int idx_height = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::HEIGHT); + + _output_width = generate(shape[idx_width], min_width, max_width); + _output_height = generate(shape[idx_height], min_height, max_height); + } + + template <typename U> + void fill(U &&tensor) + { + if (tensor.data_type() == DataType::F32) + { + std::uniform_real_distribution<float> distribution(-5.0f, 5.0f); + library->fill(tensor, distribution, 0); + } + else if (tensor.data_type() == DataType::F16) + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-5.0f, 5.0f}; + library->fill(tensor, distribution, 0); + } + else if (is_data_type_quantized(tensor.data_type())) + { + std::uniform_int_distribution<> distribution(0, 100); + library->fill(tensor, distribution, 0); + } + else + { + library->fill_tensor_uniform(tensor, 0); + } + } + + TensorType compute_target(TensorShape shape) + { + // Our test shapes are assumed in NCHW data layout, thus the permutation + permute(shape, PermutationVector(2U, 0U, 1U)); + + // Create a new workload sketch + CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + GpuWorkloadContext context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + // Create sketch tensors + ITensorInfo *src_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type, _data_layout)); + src_info->set_quantization_info(_input_quantization_info); + ITensorInfo *dst_info = context.create_tensor_info(); + + ResizeAttributes attributes; + attributes.align_corners(_align_corners) + .sampling_policy(_sampling_policy) + .interpolation_policy(_interpolation_policy) + .output_width(_output_width) + .output_height(_output_height); + + ITensorInfo *scale_result_info = FunctionType::create_op(sketch, src_info, attributes); + GpuOutput::create_op(sketch, scale_result_info, dst_info); + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + // (Important) Allocate auxiliary tensor memory if there are any + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + + // Construct user tensors + TensorType t_src{}; + TensorType t_dst{}; + + // Initialize user tensors + t_src.allocator()->init(*src_info); + t_dst.allocator()->init(*dst_info); + + // Allocate and fill user tensors + t_src.allocator()->allocate(); + t_dst.allocator()->allocate(); + + fill(AccessorType(t_src)); + + // Run runtime + runtime.run({&t_src, &t_dst}); + + return t_dst; + } + + SimpleTensor<T> compute_reference(const TensorShape &shape) + { + // Create reference + SimpleTensor<T> src{shape, _data_type, 1, _input_quantization_info}; + + // Reference code is NCHW, so the input shapes are NCHW + const int idx_width = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::WIDTH); + const int idx_height = get_data_layout_dimension_index(DataLayout::NCHW, DataLayoutDimension::HEIGHT); + + const float scale_x = static_cast<float>(_output_width) / shape[idx_width]; + const float scale_y = static_cast<float>(_output_height) / shape[idx_height]; + + // Fill reference + fill(src); + + return reference::scale<T>(src, scale_x, scale_y, _interpolation_policy, BorderMode::REPLICATE, + static_cast<T>(0), _sampling_policy, /* ceil_policy_scale */ false, _align_corners, + _output_quantization_info); + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; + TensorShape _shape{}; + InterpolationPolicy _interpolation_policy{}; + SamplingPolicy _sampling_policy{}; + DataType _data_type{}; + DataLayout _data_layout{}; + QuantizationInfo _input_quantization_info{}; + QuantizationInfo _output_quantization_info{}; + bool _align_corners{false}; + int _output_width{0}; + int _output_height{0}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionResizeValidationFixture + : public DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape shape, + DataType data_type, + DataLayout data_layout, + InterpolationPolicy policy, + SamplingPolicy sampling_policy, + bool align_corners) + { + DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup( + shape, data_type, QuantizationInfo(), data_layout, policy, sampling_policy, align_corners, + QuantizationInfo()); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false> +class DynamicFusionResizeQuantizedValidationFixture + : public DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape shape, + DataType data_type, + QuantizationInfo quantization_info, + DataLayout data_layout, + InterpolationPolicy policy, + SamplingPolicy sampling_policy, + bool align_corners) + { + DynamicFusionResizeGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup( + shape, data_type, quantization_info, data_layout, policy, sampling_policy, align_corners, + quantization_info); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute + +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_RESIZEFIXTURE_H diff --git a/tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h new file mode 100644 index 0000000000..175d4ff889 --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h @@ -0,0 +1,158 @@ +/* +* Copyright (c) 2023-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_SOFTMAXFIXTURE_H +#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_SOFTMAXFIXTURE_H + +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/SoftmaxAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" + +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/SimpleTensor.h" +#include "tests/validation/reference/SoftmaxLayer.h" +#include "tests/validation/Validation.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionSoftmaxValidationGenericFixture : public framework::Fixture +{ +public: + void setup(TensorShape shape, DataType data_type, float beta, size_t axis, bool is_log) + { + _reference = compute_reference(shape, data_type, beta, axis, is_log); + _target = compute_target(shape, data_type, beta, axis, is_log); + } + +protected: + template <typename U> + void fill(U &&tensor) + { + if (tensor.data_type() == DataType::F32) + { + std::uniform_real_distribution<float> distribution(-10.0f, 10.0f); + library->fill(tensor, distribution, 0); + } + else if (tensor.data_type() == DataType::F16) + { + arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-10.0f, 10.0f}; + library->fill(tensor, distribution, 0); + } + else if (!is_data_type_quantized(tensor.data_type())) + { + std::uniform_int_distribution<> distribution(0, 100); + library->fill(tensor, distribution, 0); + } + else + { + library->fill_tensor_uniform(tensor, 0); + } + } + + TensorType compute_target(const TensorShape &shape, DataType data_type, float beta, int32_t axis, bool is_log) + { + // Create a new workload sketch + CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + GpuWorkloadContext context = GpuWorkloadContext{&cl_compile_ctx}; + GpuWorkloadSketch sketch{&context}; + + SoftmaxAttributes softmax_attr{}; + softmax_attr.axis(axis).beta(beta).is_log_softmax(is_log); + ITensorInfo *src_info = context.create_tensor_info(shape, 1, data_type); + ITensorInfo *dst_info = context.create_tensor_info(shape, 1, data_type); + FunctionType::create_op(sketch, src_info, dst_info, softmax_attr); + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + // (Important) Allocate auxiliary tensor memory if there are any + // Instead of using ACL allocated memory, the user can choose to import memory into the tensors + for (auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + // Construct user tensors + TensorType src{}; + TensorType dst{}; + + // Initialize user tensors + src.allocator()->init(*src_info); + dst.allocator()->init(*dst_info); + + // Allocate and fill user tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + fill(AccessorType(src)); + + // Run runtime + runtime.run({&src, &dst}); + + return dst; + } + + SimpleTensor<T> + compute_reference(const TensorShape &shape, DataType data_type, float beta, int32_t axis, bool is_log) + { + // Create reference + SimpleTensor<T> src{shape, data_type, 1}; + + // Fill reference + fill(src); + + return reference::softmax_layer<T>(src, beta, axis, is_log); + } + + TensorType _target{}; + SimpleTensor<T> _reference{}; +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class DynamicFusionSoftmaxValidationFixture + : public DynamicFusionSoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T> +{ +public: + void setup(TensorShape shape, DataType data_type, float beta, size_t axis, bool is_log) + { + DynamicFusionSoftmaxValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup( + shape, data_type, beta, axis, is_log); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute + +#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_SOFTMAXFIXTURE_H |