diff options
Diffstat (limited to 'tests/validation/fixtures')
-rw-r--r-- | tests/validation/fixtures/QuantizationLayerFixture.h | 65 |
1 files changed, 64 insertions, 1 deletions
diff --git a/tests/validation/fixtures/QuantizationLayerFixture.h b/tests/validation/fixtures/QuantizationLayerFixture.h index 8590b7193b..65de405788 100644 --- a/tests/validation/fixtures/QuantizationLayerFixture.h +++ b/tests/validation/fixtures/QuantizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -103,6 +103,69 @@ protected: TensorType _target{}; SimpleTensor<uint8_t> _reference{}; }; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename T> +class QAsymm8QuantizationValidationFixture : public framework::Fixture +{ +public: + template <typename...> + void setup(TensorShape shape, DataType data_type, QuantizationInfo quant_info) + { + _target = compute_target(shape, data_type, quant_info); + _reference = compute_reference(shape, data_type, quant_info); + } + +protected: + template <typename U> + void fill(U &&tensor) + { + library->fill_tensor_uniform(tensor, 0); + } + + TensorType compute_target(const TensorShape &shape, DataType data_type, QuantizationInfo quant_info) + { + // Create tensors + TensorType src = create_tensor<TensorType>(shape, data_type); + TensorType dst = create_tensor<TensorType>(shape, DataType::QASYMM8, 1, quant_info); + + // Create and configure function + FunctionType quantization_layer; + quantization_layer.configure(&src, &dst); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensors + fill(AccessorType(src)); + + // Compute function + quantization_layer.run(); + + return dst; + } + + SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, DataType data_type, QuantizationInfo quant_info) + { + // Create reference + SimpleTensor<T> src{ shape, data_type }; + + // Fill reference + fill(src); + + return reference::quantization_layer<T>(src, quant_info); + } + + TensorType _target{}; + SimpleTensor<uint8_t> _reference{}; +}; + } // namespace validation } // namespace test } // namespace arm_compute |