aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/DequantizationLayerFixture.h
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/fixtures/DequantizationLayerFixture.h')
-rw-r--r--tests/validation/fixtures/DequantizationLayerFixture.h106
1 files changed, 70 insertions, 36 deletions
diff --git a/tests/validation/fixtures/DequantizationLayerFixture.h b/tests/validation/fixtures/DequantizationLayerFixture.h
index 7543eb2d2e..28d43cf754 100644
--- a/tests/validation/fixtures/DequantizationLayerFixture.h
+++ b/tests/validation/fixtures/DequantizationLayerFixture.h
@@ -49,11 +49,8 @@ public:
template <typename...>
void setup(TensorShape shape, DataType data_type)
{
- // Initialize random min and max values
- rand_min_max(&_min, &_max);
-
- _target = compute_target(shape, data_type, _min, _max);
- _reference = compute_reference(shape, data_type, _min, _max);
+ _target = compute_target(shape, data_type);
+ _reference = compute_reference(shape, data_type);
}
protected:
@@ -63,28 +60,80 @@ protected:
library->fill_tensor_uniform(tensor, 0);
}
- TensorType compute_target(const TensorShape &shape, DataType data_type, float min, float max)
+ template <typename U>
+ void fill_min_max(U &&tensor)
+ {
+ std::mt19937 gen(library->seed());
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+
+ Window window;
+
+ window.set(0, Window::Dimension(0, tensor.shape()[0], 2));
+
+ for(unsigned int d = 1; d < tensor.shape().num_dimensions(); ++d)
+ {
+ window.set(d, Window::Dimension(0, tensor.shape()[d], 1));
+ }
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const float n1 = distribution(gen);
+ const float n2 = distribution(gen);
+
+ float min = 0.0f;
+ float max = 0.0f;
+
+ if(n1 < n2)
+ {
+ min = n1;
+ max = n2;
+ }
+ else
+ {
+ min = n2;
+ max = n1;
+ }
+
+ auto out_ptr = reinterpret_cast<float *>(tensor(id));
+ out_ptr[0] = min;
+ out_ptr[1] = max;
+ });
+ }
+
+ TensorType compute_target(const TensorShape &shape, DataType data_type)
{
+ TensorShape shape_min_max = shape;
+ shape_min_max.set(Window::DimX, 2);
+
+ // Remove Y and Z dimensions and keep the batches
+ shape_min_max.remove_dimension(1);
+ shape_min_max.remove_dimension(1);
+
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, DataType::F32);
+ TensorType src = create_tensor<TensorType>(shape, data_type);
+ TensorType dst = create_tensor<TensorType>(shape, DataType::F32);
+ TensorType min_max = create_tensor<TensorType>(shape_min_max, DataType::F32);
// Create and configure function
FunctionType dequantization_layer;
- dequantization_layer.configure(&src, &dst, &min, &max);
+ dequantization_layer.configure(&src, &dst, &min_max);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(min_max.info()->is_resizable(), framework::LogLevel::ERRORS);
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
+ min_max.allocator()->allocate();
ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!min_max.info()->is_resizable(), framework::LogLevel::ERRORS);
// Fill tensors
fill(AccessorType(src));
+ fill_min_max(AccessorType(min_max));
// Compute function
dequantization_layer.run();
@@ -92,43 +141,28 @@ protected:
return dst;
}
- SimpleTensor<float> compute_reference(const TensorShape &shape, DataType data_type, float min, float max)
+ SimpleTensor<float> compute_reference(const TensorShape &shape, DataType data_type)
{
+ TensorShape shape_min_max = shape;
+ shape_min_max.set(Window::DimX, 2);
+
+ // Remove Y and Z dimensions and keep the batches
+ shape_min_max.remove_dimension(1);
+ shape_min_max.remove_dimension(1);
+
// Create reference
- SimpleTensor<T> src{ shape, data_type };
+ SimpleTensor<T> src{ shape, data_type };
+ SimpleTensor<float> min_max{ shape_min_max, data_type };
// Fill reference
fill(src);
+ fill_min_max(min_max);
- return reference::dequantization_layer<T>(src, min, max);
- }
-
- /** Generate random constant values to be used as min and max for dequantization.
- */
- void rand_min_max(float *min, float *max)
- {
- std::mt19937 gen(library->seed());
- std::uniform_real_distribution<float> distribution(-10000.0, 10000.0);
-
- const float n1 = distribution(gen);
- const float n2 = distribution(gen);
-
- if(n1 < n2)
- {
- *min = n1;
- *max = n2;
- }
- else
- {
- *min = n2;
- *max = n1;
- }
+ return reference::dequantization_layer<T>(src, min_max);
}
TensorType _target{};
SimpleTensor<float> _reference{};
- float _min = 0.f;
- float _max = 0.f;
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>