aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
authorChunosov <N.Chunosov@yandex.ru>2017-11-22 20:42:13 +0700
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commit5124be5d1caa70964d452cf9a8cc7c67df31fa9d (patch)
tree77d74963e9c3f52050cbc264a692133395182e98 /tests/validation
parent9873ea3f1ea238ba7abfb635807614517c52be4b (diff)
downloadComputeLibrary-5124be5d1caa70964d452cf9a8cc7c67df31fa9d.tar.gz
COMPMID-661: Convolution quantized (#32)
Change-Id: Id69df4ce98d1d89bdf9c9aa5c4d909659909b30f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110456 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp55
-rw-r--r--tests/validation/CL/DirectConvolutionLayer.cpp2
-rw-r--r--tests/validation/CL/FullyConnectedLayer.cpp7
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h121
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h2
5 files changed, 141 insertions, 46 deletions
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index a6e07248aa..56e10f01b3 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -45,7 +45,8 @@ namespace
{
RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-constexpr AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
+constexpr AbsoluteTolerance<float> tolerance_fixed(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
+constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
/** CNN data types */
@@ -55,6 +56,7 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F32,
DataType::QS8,
DataType::QS16,
+ DataType::QASYMM8,
});
} // namespace
@@ -67,17 +69,22 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Set fixed point position data type allowed
int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
+ auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
+
// Create tensors
- CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position);
- CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ const QuantizationInfo src_quantization_info = src.info()->quantization_info();
+ const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
+
// Create and configure function
CLConvolutionLayer conv;
conv.configure(&src, &weights, &bias, &dst, info);
@@ -93,6 +100,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
validate(bias.info()->valid_region(), bias_valid_region);
validate(dst.info()->valid_region(), dst_valid_region);
+ // Validate QuantizationInfo
+ ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
+
// Validate padding
//TODO(COMPMID-415) Need to validate padding?
}
@@ -143,7 +154,7 @@ TEST_SUITE_END()
template <typename T>
using CLConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLConvolutionLayer, T>;
-TEST_SUITE(Quantized)
+TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
// We test for fixed point precision [4,6]
FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
@@ -153,7 +164,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int8_t>, fr
framework::dataset::make("FractionalBits", 4, 7)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
@@ -162,7 +173,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int8_t>, fr
framework::dataset::make("FractionalBits", 4, 7)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
}
TEST_SUITE_END()
@@ -175,7 +186,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int16_t>, f
framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
@@ -184,7 +195,31 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int16_t>, f
framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+template <typename T>
+using CLConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLConvolutionLayer, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
TEST_SUITE_END()
TEST_SUITE_END()
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index 84e1bca6a5..4b747d64f7 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -232,4 +232,4 @@ TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
} // namespace test
-} // namespace arm_compute
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp
index e53f5fd407..0d8c8774b8 100644
--- a/tests/validation/CL/FullyConnectedLayer.cpp
+++ b/tests/validation/CL/FullyConnectedLayer.cpp
@@ -99,6 +99,9 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ const QuantizationInfo src_quantization_info = src.info()->quantization_info();
+ const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
+
// Create and configure function.
CLFullyConnectedLayer fc;
fc.configure(&src, &weights, &bias, &dst, transpose_weights, !reshape_weights);
@@ -106,6 +109,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
// Validate valid region
const ValidRegion dst_valid_region = shape_to_valid_region(dst_shape);
validate(dst.info()->valid_region(), dst_valid_region);
+
+ // Validate QuantizationInfo
+ ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
}
template <typename T>
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 859780812a..48b4665fe7 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -47,17 +47,24 @@ namespace test
namespace validation
{
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ConvolutionValidationFixedPointFixture : public framework::Fixture
+class ConvolutionValidationGenericFixture : public framework::Fixture
{
public:
+ using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
+
+public:
template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights, DataType data_type, int fractional_bits)
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights,
+ DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
{
- _fractional_bits = fractional_bits;
- _data_type = data_type;
-
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, fractional_bits);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, fractional_bits);
+ _data_type = data_type;
+ _is_quantized = is_data_type_quantized_asymmetric(data_type);
+ _bias_data_type = _is_quantized ? DataType::S32 : data_type;
+ _fractional_bits = fractional_bits;
+ _quantization_info = quantization_info;
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info);
}
protected:
@@ -66,6 +73,18 @@ protected:
{
switch(tensor.data_type())
{
+ case DataType::QASYMM8:
+ {
+ std::uniform_int_distribution<uint8_t> distribution(0, 3);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::S32:
+ {
+ std::uniform_int_distribution<int32_t> distribution(-100, 100);
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F16:
case DataType::F32:
{
@@ -79,7 +98,7 @@ protected:
}
TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- bool reshape_weights, DataType data_type, int fixed_point_position)
+ bool reshape_weights)
{
WeightsInfo weights_info(!reshape_weights, weights_shape.x(), weights_shape.y(), weights_shape[3]);
TensorShape reshaped_weights_shape(weights_shape);
@@ -90,15 +109,16 @@ protected:
const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
bool is_optimised = false;
#if defined(__arm__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && data_type == DataType::F32;
+ is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && _data_type == DataType::F32;
#elif defined(__aarch64__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && data_type == DataType::F32;
+ is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && _data_type == DataType::F32;
#endif /* defined(__arm__) || defined(__aarch64__) */
reshaped_weights_shape.collapse(3);
- if(bias_shape.total_size() > 0)
+ if(bias_shape.total_size() > 0 && !_is_quantized)
{
+ // Add bias to the weights reshaped matrix
reshaped_weights_shape.set(0, reshaped_weights_shape.x() + 1);
}
@@ -110,17 +130,17 @@ protected:
}
else
{
- const int interleave_width = 16 / data_size_from_type(data_type);
+ const int interleave_width = 16 / data_size_from_type(_data_type);
reshaped_weights_shape.set(0, reshaped_weights_shape.x() * interleave_width);
reshaped_weights_shape.set(1, static_cast<unsigned int>(std::ceil(reshaped_weights_shape.y() / static_cast<float>(interleave_width))));
}
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position);
- TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, data_type, 1, fixed_point_position);
- TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1, fixed_point_position);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position);
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _fractional_bits, _quantization_info);
+ TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _fractional_bits, _quantization_info);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info);
+ TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _fractional_bits, _quantization_info);
// Create and configure function
FunctionType conv;
@@ -150,20 +170,28 @@ protected:
const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
bool is_optimised = false;
#if defined(__arm__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && data_type == DataType::F32;
+ is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && _data_type == DataType::F32;
#elif defined(__aarch64__)
- is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && data_type == DataType::F32;
+ is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && _data_type == DataType::F32;
#endif /* defined(__arm__) || defined(__aarch64__) */
TensorShape tmp_weights_shape(weights_shape);
- SimpleTensor<T> tmp_weights(tmp_weights_shape, data_type, 1, fixed_point_position);
- SimpleTensor<T> tmp_bias(bias_shape, data_type, 1, fixed_point_position);
+ SimpleTensor<T> tmp_weights(tmp_weights_shape, _data_type, 1, _fractional_bits, _quantization_info);
// Fill with original shape
fill(tmp_weights, 1);
- fill(tmp_bias, 2);
- tmp_weights = linearise_weights(tmp_weights, &tmp_bias);
+ if(_is_quantized)
+ {
+ fill(AccessorType(bias), 2);
+ tmp_weights = linearise_weights(tmp_weights);
+ }
+ else
+ {
+ SimpleTensor<T> tmp_bias(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info);
+ fill(tmp_bias, 2);
+ tmp_weights = linearise_weights(tmp_weights, &tmp_bias);
+ }
if(!is_fully_connected_convolution && !is_optimised)
{
@@ -192,13 +220,12 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, int fixed_point_position)
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info)
{
// Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position };
- SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position };
- SimpleTensor<T> bias{ bias_shape, data_type, 1, fixed_point_position };
+ SimpleTensor<T> src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info };
+ SimpleTensor<T> weights{ weights_shape, _data_type, 1, _fractional_bits, _quantization_info };
+ SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info };
// Fill reference
fill(src, 0);
@@ -208,10 +235,13 @@ protected:
return reference::convolution_layer<T>(src, weights, bias, output_shape, info);
}
- TensorType _target{};
- SimpleTensor<T> _reference{};
- int _fractional_bits{};
- DataType _data_type{};
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ DataType _data_type{};
+ DataType _bias_data_type{};
+ int _fractional_bits{};
+ QuantizationInfo _quantization_info{};
+ bool _is_quantized = false;
private:
template <typename U>
@@ -241,7 +271,6 @@ private:
dst[dst_idx] = weights[weights_idx];
}
-
if(biases != nullptr)
{
// Fill last row with biases
@@ -260,13 +289,37 @@ private:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ConvolutionValidationFixture : public ConvolutionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights, DataType data_type)
{
- ConvolutionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, 0);
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, 0, QuantizationInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ConvolutionValidationFixedPointFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights, DataType data_type, int fractional_bits)
+ {
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, fractional_bits,
+ QuantizationInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights, DataType data_type,
+ QuantizationInfo quantization_info)
+ {
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, 0, quantization_info);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index c504e2e10d..06d6be3fa4 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -302,4 +302,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */
+#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */ \ No newline at end of file