diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2020-03-06 18:12:09 +0000 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2020-03-12 12:12:30 +0000 |
commit | c7b183ab741650653289f8ce3bdeb4926521fdbd (patch) | |
tree | 991e9f20340c91c288d52d8f9a64a3729e4a40b0 /tests | |
parent | 6800117df3be825f0ec5c6cc71c4377322f51b99 (diff) | |
download | ComputeLibrary-c7b183ab741650653289f8ce3bdeb4926521fdbd.tar.gz |
COMPMID-3160: Add Bfloat16 support in NEGEMMConvolutionLayer
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I0e449306c138a562ffc1455e76ec44b2fd059d85
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2860
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/validation/Helpers.cpp | 12 | ||||
-rw-r--r-- | tests/validation/Helpers.h | 2 | ||||
-rw-r--r-- | tests/validation/NEON/ConvolutionLayer.cpp | 16 | ||||
-rw-r--r-- | tests/validation/NEON/DepthConvertLayer.cpp | 14 | ||||
-rw-r--r-- | tests/validation/fixtures/ConvolutionLayerFixture.h | 39 |
5 files changed, 48 insertions, 35 deletions
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp index 4da9742c2a..093271244e 100644 --- a/tests/validation/Helpers.cpp +++ b/tests/validation/Helpers.cpp @@ -212,18 +212,6 @@ SimpleTensor<float> convert_from_symmetric(const SimpleTensor<int16_t> &src) return dst; } -SimpleTensor<float> convert_from_bfloat16(const SimpleTensor<int16_t> &src) -{ - SimpleTensor<float> dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() }; - return dst; -} - -SimpleTensor<int16_t> convert_to_bfloat(const SimpleTensor<float> &src) -{ - SimpleTensor<int16_t> dst{ src.shape(), DataType::BFLOAT16, 1, QuantizationInfo(), src.data_layout() }; - return dst; -} - template <typename T> void matrix_multiply(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &out) { diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h index 942b2396bf..9c8897394a 100644 --- a/tests/validation/Helpers.h +++ b/tests/validation/Helpers.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index fbc5a830a9..b7dee301ca 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -382,6 +382,20 @@ template <typename T> using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>; TEST_SUITE(Float) +#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) +TEST_SUITE(BFLOAT16) +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), + framework::dataset::make("ReshapeWeights", { true })), + framework::dataset::make("DataType", DataType::BFLOAT16)), + framework::dataset::make("DataLayout", { DataLayout::NHWC })), + ActivationFunctionsDataset)) +{ + // Validate output + validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32)); +} +TEST_SUITE_END() // BFLOAT16 +#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ + #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), diff --git a/tests/validation/NEON/DepthConvertLayer.cpp b/tests/validation/NEON/DepthConvertLayer.cpp index 163f539659..7af467be28 100644 --- a/tests/validation/NEON/DepthConvertLayer.cpp +++ b/tests/validation/NEON/DepthConvertLayer.cpp @@ -353,13 +353,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF32Fixture<bfloat16>, fram // Validate output validate(Accessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToF32Fixture<bfloat16>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerBF16toF32Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerZeroShiftDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} TEST_SUITE_END() // BFLOAT16_to_F32 TEST_SUITE(F32_to_BFLOAT16) @@ -370,13 +363,6 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToBF16Fixture<float>, framew // Validate output validate(Accessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToBF16Fixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerF32toBF16Dataset), - framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - DepthConvertLayerZeroShiftDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} TEST_SUITE_END() // F32_to_BFLOAT16 #endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h index 3c4b625ac6..b4abebe18d 100644 --- a/tests/validation/fixtures/ConvolutionLayerFixture.h +++ b/tests/validation/fixtures/ConvolutionLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -64,7 +64,9 @@ public: _data_type = data_type; _weights_data_type = weights_data_type; _is_quantized = is_data_type_quantized_asymmetric(data_type); - _bias_data_type = _is_quantized ? DataType::S32 : data_type; + _is_bfloat16 = data_type == DataType::BFLOAT16; + _bias_data_type = _is_quantized ? DataType::S32 : (_is_bfloat16 ? DataType::F32 : data_type); + _output_data_type = _is_bfloat16 ? DataType::F32 : data_type; _quantization_info = quantization_info; _weight_quantization_info = weight_quantization_info; _data_layout = data_layout; @@ -74,6 +76,15 @@ public: } protected: + void regularize_values(void *values, size_t size) + { + float *fvalues = static_cast<float *>(values); + for(size_t i = 0; i < size; ++i) + { + fvalues[i] = float(bfloat16(fvalues[i])); + } + } + template <typename U> void fill(U &&tensor, int i) { @@ -119,6 +130,7 @@ protected: library->fill(tensor, distribution, i); break; } + case DataType::BFLOAT16: case DataType::F16: case DataType::F32: { @@ -155,7 +167,7 @@ protected: TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout); TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout); TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout); - TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info, _data_layout); + TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, _quantization_info, _data_layout); // Create and configure function FunctionType conv; @@ -195,16 +207,27 @@ protected: const unsigned int num_groups = input_shape[2] / weights_shape[2]; + // Setup reference data types + const DataType src_dt = _is_bfloat16 ? DataType::F32 : _data_type; + const DataType weights_dt = _is_bfloat16 ? DataType::F32 : _weights_data_type; + const DataType bias_dt = _is_bfloat16 ? DataType::F32 : _bias_data_type; + // Create reference - SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info }; - SimpleTensor<TW> weights{ weights_shape, _weights_data_type, 1, _weight_quantization_info }; - SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info }; + SimpleTensor<T> src{ input_shape, src_dt, 1, _quantization_info }; + SimpleTensor<TW> weights{ weights_shape, weights_dt, 1, _weight_quantization_info }; + SimpleTensor<TBias> bias{ bias_shape, bias_dt, 1, _quantization_info }; - // Fill reference fill(src, 0); fill(weights, 1); fill(bias, 2); + // Fill with bfloat16 to perform the conversion and reduce the mismatches in the output + if(_is_bfloat16) + { + regularize_values(static_cast<void *>(src.data()), src.num_elements()); + regularize_values(static_cast<void *>(weights.data()), weights.num_elements()); + } + return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups), act_info) : reference::convolution_layer<T>(src, weights, bias, output_shape, info, dilation, num_groups); @@ -215,10 +238,12 @@ protected: DataType _data_type{}; DataType _weights_data_type{}; DataType _bias_data_type{}; + DataType _output_data_type{}; DataLayout _data_layout{}; QuantizationInfo _quantization_info{}; QuantizationInfo _weight_quantization_info{}; bool _is_quantized = false; + bool _is_bfloat16 = false; }; template <typename TensorType, typename AccessorType, typename FunctionType, typename T> |