diff options
author | Pablo Tello <pablo.tello@arm.com> | 2017-07-11 16:31:35 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-09-17 14:16:42 +0100 |
commit | b49a7153c901b5c523a3d07815b79a4f460533b1 (patch) | |
tree | 5f6c58964365be1f49b444c794de071d6ca09417 /tests/validation | |
parent | f7629cf5637c9939877fc0f53a4ad56587069172 (diff) | |
download | ComputeLibrary-b49a7153c901b5c523a3d07815b79a4f460533b1.tar.gz |
COMPMID-421: Added FP16 support to Softmax.
Change-Id: If48178689e7cdadf1858556438c7292128be5b92
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/80436
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Moritz Pflanzer <moritz.pflanzer@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r-- | tests/validation/NEON/ActivationLayer.cpp | 2 | ||||
-rw-r--r-- | tests/validation/NEON/ConvolutionLayerDirect.cpp | 4 | ||||
-rw-r--r-- | tests/validation/NEON/PoolingLayer.cpp | 2 | ||||
-rw-r--r-- | tests/validation/NEON/SoftmaxLayer.cpp | 26 |
4 files changed, 27 insertions, 7 deletions
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index fbb5d17540..5f1a2c6fb6 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -238,7 +238,7 @@ BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * S RawTensor ref_dst = Reference::compute_reference_activation_layer(shape, dt, act_info); // Validate output - validate(NEAccessor(dst), ref_dst, activation_layer_tolerance(dt, act_function)); + validate(Accessor(dst), ref_dst, activation_layer_tolerance(dt, act_function)); } BOOST_AUTO_TEST_SUITE_END() #endif /* ARM_COMPUTE_ENABLE_FP16 */ diff --git a/tests/validation/NEON/ConvolutionLayerDirect.cpp b/tests/validation/NEON/ConvolutionLayerDirect.cpp index 034a8b2045..effb898428 100644 --- a/tests/validation/NEON/ConvolutionLayerDirect.cpp +++ b/tests/validation/NEON/ConvolutionLayerDirect.cpp @@ -150,7 +150,7 @@ BOOST_DATA_TEST_CASE(W1x1, RawTensor ref = Reference::compute_reference_convolution_layer(input_shape, w_shape, b_shape, d_shape, dt, conv_info, 0); // Validate output - validate(NEAccessor(dst), ref); + validate(Accessor(dst), ref); } BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) @@ -172,7 +172,7 @@ BOOST_DATA_TEST_CASE(W3x3, DirectConvolutionShapes() * boost::unit_test::data::m RawTensor ref = Reference::compute_reference_convolution_layer(input_shape, w_shape, b_shape, d_shape, dt, conv_info, 0); // Validate output - validate(NEAccessor(dst), ref, tolerance_fp16); + validate(Accessor(dst), ref, tolerance_fp16); } BOOST_AUTO_TEST_SUITE_END() #endif /* ARM_COMPUTE_ENABLE_FP16 */ diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index 3961770310..0d2f285dff 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -162,7 +162,7 @@ BOOST_DATA_TEST_CASE(RandomDataset, RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info); // Validate output - validate(NEAccessor(dst), ref_dst, tolerance_f16, 0); + validate(Accessor(dst), ref_dst, tolerance_f16, 0); } BOOST_AUTO_TEST_SUITE_END() #endif /* ARM_COMPUTE_ENABLE_FP16 */ diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp index 92ca673f17..8422ba363c 100644 --- a/tests/validation/NEON/SoftmaxLayer.cpp +++ b/tests/validation/NEON/SoftmaxLayer.cpp @@ -49,7 +49,10 @@ using namespace arm_compute::test::validation; namespace { /** Tolerance for float operations */ -const float tolerance = 0.000001f; +const float tolerance_f32 = 0.000001f; +#ifdef ARM_COMPUTE_ENABLE_FP16 +const float tolerance_f16 = 0.0001f; +#endif /* ARM_COMPUTE_ENABLE_FP16*/ /** Tolerance for fixed point operations */ const float tolerance_fixed_point = 2.f; @@ -102,6 +105,23 @@ Tensor compute_softmax_layer(const TensorShape &shape, DataType dt, int fixed_po BOOST_AUTO_TEST_SUITE(NEON) BOOST_AUTO_TEST_SUITE(SoftmaxLayer) +#ifdef ARM_COMPUTE_ENABLE_FP16 +BOOST_AUTO_TEST_SUITE(Float16) +BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit")) +BOOST_DATA_TEST_CASE(RunSmall, SmallShapes(), shape) +{ + // Compute function + Tensor dst = compute_softmax_layer(shape, DataType::F16); + + // Compute reference + RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::F16); + + // Validate output + validate(Accessor(dst), ref_dst, tolerance_f16); +} +BOOST_AUTO_TEST_SUITE_END() +#endif /* ARM_COMPUTE_ENABLE_FP16*/ + BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly")) BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * CNNDataTypes(), shape, dt) { @@ -142,7 +162,7 @@ BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * CNNFloatDataTypes(), shape, dt) RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt); // Validate output - validate(Accessor(dst), ref_dst, tolerance); + validate(Accessor(dst), ref_dst, tolerance_f32); } BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly")) @@ -155,7 +175,7 @@ BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * CNNFloatDataTypes(), shape, dt) RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt); // Validate output - validate(Accessor(dst), ref_dst, tolerance); + validate(Accessor(dst), ref_dst, tolerance_f32); } BOOST_AUTO_TEST_SUITE_END() |