aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/datasets/ShapeDatasets.h60
-rw-r--r--tests/validation/CPP/DequantizationLayer.cpp24
-rw-r--r--tests/validation/CPP/DequantizationLayer.h2
-rw-r--r--tests/validation/CPP/QuantizationLayer.cpp49
-rw-r--r--tests/validation/NEON/DequantizationLayer.cpp39
-rw-r--r--tests/validation/NEON/QuantizationLayer.cpp13
-rw-r--r--tests/validation/fixtures/DequantizationLayerFixture.h106
7 files changed, 229 insertions, 64 deletions
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index 4c449a702f..806fc04c0d 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -63,6 +63,36 @@ public:
}
};
+/** Data set containing small 3D tensor shapes. */
+class Small3DShapes final : public ShapeDataset
+{
+public:
+ Small3DShapes()
+ : ShapeDataset("Shape",
+ {
+ TensorShape{ 7U, 7U, 5U },
+ TensorShape{ 27U, 13U, 37U },
+ TensorShape{ 128U, 64U, 21U }
+ })
+ {
+ }
+};
+
+/** Data set containing small 4D tensor shapes. */
+class Small4DShapes final : public ShapeDataset
+{
+public:
+ Small4DShapes()
+ : ShapeDataset("Shape",
+ {
+ TensorShape{ 7U, 7U, 5U, 3U },
+ TensorShape{ 27U, 13U, 37U, 2U },
+ TensorShape{ 128U, 64U, 21U, 3U }
+ })
+ {
+ }
+};
+
/** Data set containing small tensor shapes. */
class SmallShapes final : public ShapeDataset
{
@@ -117,6 +147,36 @@ public:
}
};
+/** Data set containing large 3D tensor shapes. */
+class Large3DShapes final : public ShapeDataset
+{
+public:
+ Large3DShapes()
+ : ShapeDataset("Shape",
+ {
+ TensorShape{ 320U, 240U, 3U },
+ TensorShape{ 383U, 653U, 2U },
+ TensorShape{ 721U, 123U, 13U }
+ })
+ {
+ }
+};
+
+/** Data set containing large 4D tensor shapes. */
+class Large4DShapes final : public ShapeDataset
+{
+public:
+ Large4DShapes()
+ : ShapeDataset("Shape",
+ {
+ TensorShape{ 320U, 123U, 3U, 3U },
+ TensorShape{ 383U, 413U, 2U, 3U },
+ TensorShape{ 517U, 123U, 13U, 2U }
+ })
+ {
+ }
+};
+
/** Data set containing small tensor shapes for direct convolution. */
class SmallDirectConvolutionShapes final : public ShapeDataset
{
diff --git a/tests/validation/CPP/DequantizationLayer.cpp b/tests/validation/CPP/DequantizationLayer.cpp
index 1c7ec25255..33096a1d81 100644
--- a/tests/validation/CPP/DequantizationLayer.cpp
+++ b/tests/validation/CPP/DequantizationLayer.cpp
@@ -32,23 +32,35 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<float> dequantization_layer(const SimpleTensor<T> &src, float min, float max)
+SimpleTensor<float> dequantization_layer(const SimpleTensor<T> &src, const SimpleTensor<float> &min_max)
{
// Create reference
SimpleTensor<float> dst{ src.shape(), DataType::F32 };
- const float range = max - min;
- const float scaling = range / 255.0f;
+ // Compute reference
+ const int width = src.shape().x();
+ const int height = src.shape().y();
+ const int depth = src.shape().z();
+ const int stride_w = width * height * depth;
+ const int num_batches = min_max.shape().total_size_upper(1);
- for(int i = 0; i < src.num_elements(); ++i)
+ for(int k = 0; k < num_batches; ++k)
{
- dst[i] = (static_cast<float>(src[i]) * scaling) + min;
+ const float min = min_max[k * 2 + 0];
+ const float max = min_max[k * 2 + 1];
+ const float range = max - min;
+ const float scaling = range / 255.0f;
+
+ for(int i = 0; i < stride_w; ++i)
+ {
+ dst[i + k * stride_w] = (static_cast<float>(src[i + k * stride_w]) * scaling) + min;
+ }
}
return dst;
}
-template SimpleTensor<float> dequantization_layer(const SimpleTensor<uint8_t> &src, float min, float max);
+template SimpleTensor<float> dequantization_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<float> &min_max);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/CPP/DequantizationLayer.h b/tests/validation/CPP/DequantizationLayer.h
index 3aae338116..1a8adcf9d8 100644
--- a/tests/validation/CPP/DequantizationLayer.h
+++ b/tests/validation/CPP/DequantizationLayer.h
@@ -36,7 +36,7 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
-SimpleTensor<float> dequantization_layer(const SimpleTensor<T> &src, float min, float max);
+SimpleTensor<float> dequantization_layer(const SimpleTensor<T> &src, const SimpleTensor<float> &min_max);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/CPP/QuantizationLayer.cpp b/tests/validation/CPP/QuantizationLayer.cpp
index d61e75a3a9..0584d88a37 100644
--- a/tests/validation/CPP/QuantizationLayer.cpp
+++ b/tests/validation/CPP/QuantizationLayer.cpp
@@ -60,19 +60,48 @@ SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<T> &src)
// Create reference
SimpleTensor<uint8_t> dst{ src.shape(), DataType::U8 };
- // Compute min and max of the tensor using Min-Max layer
- float min = 0.f;
- float max = 0.f;
+ const int width = src.shape().x();
+ const int height = src.shape().y();
+ const int depth = src.shape().z();
+ const int stride_w = width * height * depth;
+ const int num_batches = src.shape().total_size_upper(3);
- compute_min_max(src, &min, &max);
+ for(int k = 0; k < num_batches; ++k)
+ {
+ // Compute min and max of the 3D tensor
+ float min = src[0];
+ float max = src[0];
- const float range = max - min;
+ // Look for min and max values
+ for(int i = 1; i < stride_w; ++i)
+ {
+ float val = src[i + k * stride_w];
+ if(val < min)
+ {
+ min = val;
+ }
+ if(val > max)
+ {
+ max = val;
+ }
+ }
- for(int i = 0; i < src.num_elements(); ++i)
- {
- // map values to range [0.0, 1.0]
- const float normalized = (src[i] - min) / range;
- dst[i] = static_cast<uint8_t>(std::min(255.0f, normalized * 256.0f));
+ // Saturate the result in case min = max
+ if(min == max)
+ {
+ min = 0.0f;
+ max = 1.0f;
+ }
+
+ const float range = max - min;
+
+ for(int i = 0; i < stride_w; ++i)
+ {
+ // map values to range [0.0, 1.0]
+ float val = src[i + k * stride_w];
+ const float normalized = (val - min) / range;
+ dst[i + k * stride_w] = static_cast<uint8_t>(std::min(255.0f, normalized * 256.0f));
+ }
}
return dst;
diff --git a/tests/validation/NEON/DequantizationLayer.cpp b/tests/validation/NEON/DequantizationLayer.cpp
index 22d56ab5d8..9bdba7204f 100644
--- a/tests/validation/NEON/DequantizationLayer.cpp
+++ b/tests/validation/NEON/DequantizationLayer.cpp
@@ -44,35 +44,56 @@ namespace
{
/** Tolerance for float operations */
constexpr AbsoluteTolerance<float> tolerance_f32(0.001f);
+
+const auto DequantizationShapes = concat(concat(concat(datasets::Small3DShapes(),
+ datasets::Large3DShapes()),
+ datasets::Small4DShapes()),
+ datasets::Large4DShapes());
+
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(DequantizationLayer)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("DataType", DataType::U8)), shape, data_type)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(DequantizationShapes, framework::dataset::make("DataType", DataType::U8)), shape, data_type)
{
+ TensorShape shape_min_max = shape;
+ shape_min_max.set(Window::DimX, 2);
+
+ // Remove Y and Z dimensions and keep the batches
+ shape_min_max.remove_dimension(1);
+ shape_min_max.remove_dimension(1);
+
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, data_type);
- Tensor dst = create_tensor<Tensor>(shape, DataType::F32);
+ Tensor src = create_tensor<Tensor>(shape, data_type);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::F32);
+ Tensor min_max = create_tensor<Tensor>(shape_min_max, DataType::F32);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(min_max.info()->is_resizable(), framework::LogLevel::ERRORS);
// Create and configure function
- float min = 0.f;
- float max = 0.f;
NEDequantizationLayer dequant_layer;
- dequant_layer.configure(&src, &dst, &min, &max);
+ dequant_layer.configure(&src, &dst, &min_max);
// Validate valid region
const ValidRegion valid_region = shape_to_valid_region(shape);
validate(src.info()->valid_region(), valid_region);
validate(dst.info()->valid_region(), valid_region);
+ // Validate valid region of min_max tensor
+ const ValidRegion valid_region_min_max = shape_to_valid_region(shape_min_max);
+ validate(min_max.info()->valid_region(), valid_region_min_max);
+
// Validate padding
const PaddingSize padding = PaddingCalculator(shape.x(), 8).required_padding();
validate(src.info()->padding(), padding);
validate(dst.info()->padding(), padding);
+
+ // Validate padding of min_max tensor
+ const PaddingSize padding_min_max = PaddingCalculator(shape_min_max.x(), 2).required_padding();
+ validate(min_max.info()->padding(), padding_min_max);
}
template <typename T>
@@ -80,12 +101,14 @@ using NEDequantizationLayerFixture = DequantizationValidationFixture<Tensor, Acc
TEST_SUITE(Integer)
TEST_SUITE(U8)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType", DataType::U8)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
+ framework::dataset::make("DataType", DataType::U8)))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("DataType", DataType::U8)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
+ framework::dataset::make("DataType", DataType::U8)))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
diff --git a/tests/validation/NEON/QuantizationLayer.cpp b/tests/validation/NEON/QuantizationLayer.cpp
index 5c2fab4653..26657c4062 100644
--- a/tests/validation/NEON/QuantizationLayer.cpp
+++ b/tests/validation/NEON/QuantizationLayer.cpp
@@ -44,12 +44,17 @@ namespace
{
/** Tolerance for quantization */
constexpr AbsoluteTolerance<uint8_t> tolerance_u8(1);
+
+const auto QuantizationShapes = concat(concat(concat(datasets::Small3DShapes(),
+ datasets::Large3DShapes()),
+ datasets::Small4DShapes()),
+ datasets::Large4DShapes());
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(QuantizationLayer)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("DataType", DataType::F32)), shape, data_type)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationShapes, framework::dataset::make("DataType", DataType::F32)), shape, data_type)
{
// Create tensors
Tensor src = create_tensor<Tensor>(shape, data_type);
@@ -78,12 +83,14 @@ using NEQuantizationLayerFixture = QuantizationValidationFixture<Tensor, Accesso
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
+ framework::dataset::make("DataType", DataType::F32)))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
+ framework::dataset::make("DataType", DataType::F32)))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
diff --git a/tests/validation/fixtures/DequantizationLayerFixture.h b/tests/validation/fixtures/DequantizationLayerFixture.h
index 7543eb2d2e..28d43cf754 100644
--- a/tests/validation/fixtures/DequantizationLayerFixture.h
+++ b/tests/validation/fixtures/DequantizationLayerFixture.h
@@ -49,11 +49,8 @@ public:
template <typename...>
void setup(TensorShape shape, DataType data_type)
{
- // Initialize random min and max values
- rand_min_max(&_min, &_max);
-
- _target = compute_target(shape, data_type, _min, _max);
- _reference = compute_reference(shape, data_type, _min, _max);
+ _target = compute_target(shape, data_type);
+ _reference = compute_reference(shape, data_type);
}
protected:
@@ -63,28 +60,80 @@ protected:
library->fill_tensor_uniform(tensor, 0);
}
- TensorType compute_target(const TensorShape &shape, DataType data_type, float min, float max)
+ template <typename U>
+ void fill_min_max(U &&tensor)
+ {
+ std::mt19937 gen(library->seed());
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+
+ Window window;
+
+ window.set(0, Window::Dimension(0, tensor.shape()[0], 2));
+
+ for(unsigned int d = 1; d < tensor.shape().num_dimensions(); ++d)
+ {
+ window.set(d, Window::Dimension(0, tensor.shape()[d], 1));
+ }
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const float n1 = distribution(gen);
+ const float n2 = distribution(gen);
+
+ float min = 0.0f;
+ float max = 0.0f;
+
+ if(n1 < n2)
+ {
+ min = n1;
+ max = n2;
+ }
+ else
+ {
+ min = n2;
+ max = n1;
+ }
+
+ auto out_ptr = reinterpret_cast<float *>(tensor(id));
+ out_ptr[0] = min;
+ out_ptr[1] = max;
+ });
+ }
+
+ TensorType compute_target(const TensorShape &shape, DataType data_type)
{
+ TensorShape shape_min_max = shape;
+ shape_min_max.set(Window::DimX, 2);
+
+ // Remove Y and Z dimensions and keep the batches
+ shape_min_max.remove_dimension(1);
+ shape_min_max.remove_dimension(1);
+
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, DataType::F32);
+ TensorType src = create_tensor<TensorType>(shape, data_type);
+ TensorType dst = create_tensor<TensorType>(shape, DataType::F32);
+ TensorType min_max = create_tensor<TensorType>(shape_min_max, DataType::F32);
// Create and configure function
FunctionType dequantization_layer;
- dequantization_layer.configure(&src, &dst, &min, &max);
+ dequantization_layer.configure(&src, &dst, &min_max);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(min_max.info()->is_resizable(), framework::LogLevel::ERRORS);
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
+ min_max.allocator()->allocate();
ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!min_max.info()->is_resizable(), framework::LogLevel::ERRORS);
// Fill tensors
fill(AccessorType(src));
+ fill_min_max(AccessorType(min_max));
// Compute function
dequantization_layer.run();
@@ -92,43 +141,28 @@ protected:
return dst;
}
- SimpleTensor<float> compute_reference(const TensorShape &shape, DataType data_type, float min, float max)
+ SimpleTensor<float> compute_reference(const TensorShape &shape, DataType data_type)
{
+ TensorShape shape_min_max = shape;
+ shape_min_max.set(Window::DimX, 2);
+
+ // Remove Y and Z dimensions and keep the batches
+ shape_min_max.remove_dimension(1);
+ shape_min_max.remove_dimension(1);
+
// Create reference
- SimpleTensor<T> src{ shape, data_type };
+ SimpleTensor<T> src{ shape, data_type };
+ SimpleTensor<float> min_max{ shape_min_max, data_type };
// Fill reference
fill(src);
+ fill_min_max(min_max);
- return reference::dequantization_layer<T>(src, min, max);
- }
-
- /** Generate random constant values to be used as min and max for dequantization.
- */
- void rand_min_max(float *min, float *max)
- {
- std::mt19937 gen(library->seed());
- std::uniform_real_distribution<float> distribution(-10000.0, 10000.0);
-
- const float n1 = distribution(gen);
- const float n2 = distribution(gen);
-
- if(n1 < n2)
- {
- *min = n1;
- *max = n2;
- }
- else
- {
- *min = n2;
- *max = n1;
- }
+ return reference::dequantization_layer<T>(src, min_max);
}
TensorType _target{};
SimpleTensor<float> _reference{};
- float _min = 0.f;
- float _max = 0.f;
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>