aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/DeconvolutionLayerFixture.h
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/fixtures/DeconvolutionLayerFixture.h')
-rw-r--r--tests/validation/fixtures/DeconvolutionLayerFixture.h17
1 files changed, 4 insertions, 13 deletions
diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h
index 14f071eed0..83170c413c 100644
--- a/tests/validation/fixtures/DeconvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,6 @@ public:
using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value || std::is_same<typename std::decay<T>::type, int8_t>::value, int32_t, T >::type;
public:
- template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info,
DataType data_type, DataType weights_data_type, DataLayout data_layout,
QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, QuantizationInfo weights_quantization_info, bool add_bias)
@@ -75,14 +74,14 @@ protected:
case DataType::QASYMM8:
{
std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second);
+ std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
library->fill(tensor, distribution, i);
break;
}
case DataType::QASYMM8_SIGNED:
{
std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
- std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second);
+ std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
library->fill(tensor, distribution, i);
break;
}
@@ -102,7 +101,7 @@ protected:
max_bound = bounds.second;
}
}
- std::uniform_int_distribution<int8_t> distribution(min_bound, max_bound);
+ std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound);
library->fill(tensor, distribution, i);
break;
}
@@ -246,11 +245,9 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DeconvolutionValidationFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias)
{
- ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported");
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
@@ -267,11 +264,9 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DeconvolutionValidationAsymmFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int pad_left, unsigned int pad_right, unsigned int pad_top,
unsigned int pad_bottom, unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias)
{
- ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported");
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, pad_left, pad_right, pad_top, pad_bottom, DimensionRoundingType::CEIL);
@@ -288,11 +283,9 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, T>
{
public:
- template <typename...>
void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias)
{
- ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported");
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
@@ -310,12 +303,10 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class DeconvolutionValidationQuantizedPerChannelFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T, TW>
{
public:
- template <typename...>
void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias,
DataType weights_data_type)
{
- ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported");
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);