From 7485d5a62685cb745ab50e970adb722cb71557ac Mon Sep 17 00:00:00 2001 From: Vidhya Sudhan Loganathan Date: Wed, 4 Jul 2018 09:34:00 +0100 Subject: COMPMID-970 : Remove QS8 / QS16 support Removed fixed point related code. Change-Id: I487acf138dace3b0450e0d72ca7071eaec254566 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/137678 Tested-by: Jenkins Reviewed-by: Anthony Barbier --- tests/validation/CL/ActivationLayer.cpp | 27 +- tests/validation/CL/ArithmeticDivision.cpp | 6 +- tests/validation/CL/ConvolutionLayer.cpp | 48 +- tests/validation/CL/DeconvolutionLayer.cpp | 48 +- tests/validation/CL/DepthwiseConvolutionLayer.cpp | 160 ++-- tests/validation/CL/DilatedConvolutionLayer.cpp | 30 +- tests/validation/CL/DirectConvolutionLayer.cpp | 99 +- tests/validation/CL/LSTMLayer.cpp | 128 +-- tests/validation/CL/LocallyConnected.cpp | 64 +- tests/validation/CL/NormalizationLayer.cpp | 24 +- tests/validation/CL/PoolingLayer.cpp | 32 +- tests/validation/CL/RNNLayer.cpp | 84 +- tests/validation/CL/WidthConcatenateLayer.cpp | 24 +- tests/validation/FixedPoint.h | 997 --------------------- tests/validation/GLES_COMPUTE/ActivationLayer.cpp | 27 +- tests/validation/GLES_COMPUTE/PoolingLayer.cpp | 16 +- tests/validation/Helpers.h | 6 +- tests/validation/NEON/ConvolutionLayer.cpp | 24 +- tests/validation/NEON/DeconvolutionLayer.cpp | 48 +- tests/validation/NEON/DilatedConvolutionLayer.cpp | 24 +- tests/validation/NEON/DirectConvolutionLayer.cpp | 72 +- tests/validation/NEON/GEMMLowp.cpp | 2 +- tests/validation/NEON/LocallyConnected.cpp | 64 +- tests/validation/NEON/NormalizationLayer.cpp | 24 +- tests/validation/NEON/PoolingLayer.cpp | 28 +- tests/validation/NEON/RNNLayer.cpp | 96 +- tests/validation/NEON/Scale.cpp | 20 +- tests/validation/Validation.h | 1 - tests/validation/fixtures/PoolingLayerFixture.h | 8 +- tests/validation/fixtures/SoftmaxLayerFixture.h | 22 +- .../validation/reference/ArithmeticSubtraction.cpp | 3 +- .../reference/BatchNormalizationLayer.cpp | 1 - tests/validation/reference/ChannelCombine.cpp | 1 - tests/validation/reference/ChannelExtract.cpp | 1 - tests/validation/reference/ColorConvert.cpp | 1 - tests/validation/reference/Convolution3d.h | 71 +- tests/validation/reference/ConvolutionLayer.cpp | 1 - tests/validation/reference/DeconvolutionLayer.cpp | 1 - .../validation/reference/DepthConcatenateLayer.cpp | 1 - tests/validation/reference/DepthConvertLayer.cpp | 28 - .../reference/DepthwiseConvolutionLayer.cpp | 1 - tests/validation/reference/FixedPoint.cpp | 83 -- tests/validation/reference/FixedPoint.h | 44 - tests/validation/reference/FlattenLayer.cpp | 2 - tests/validation/reference/FullyConnectedLayer.cpp | 1 - tests/validation/reference/GEMM.cpp | 70 -- tests/validation/reference/GEMM.h | 4 +- tests/validation/reference/GEMMInterleave4x4.h | 3 +- tests/validation/reference/GEMMInterleaveBlocked.h | 3 +- tests/validation/reference/GEMMTranspose1xW.h | 3 +- tests/validation/reference/NormalizationLayer.cpp | 120 --- tests/validation/reference/NormalizationLayer.h | 4 +- .../reference/PixelWiseMultiplication.cpp | 2 - tests/validation/reference/PoolingLayer.cpp | 3 +- tests/validation/reference/SoftmaxLayer.cpp | 53 +- tests/validation/reference/SoftmaxLayer.h | 4 +- tests/validation/reference/Transpose.cpp | 3 +- .../validation/reference/WidthConcatenateLayer.cpp | 1 - 58 files changed, 609 insertions(+), 2157 deletions(-) delete mode 100644 tests/validation/FixedPoint.h delete mode 100644 tests/validation/reference/FixedPoint.cpp delete mode 100644 tests/validation/reference/FixedPoint.h (limited to 'tests/validation') diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp index 4f97d7b6c1..45b23edd27 100644 --- a/tests/validation/CL/ActivationLayer.cpp +++ b/tests/validation/CL/ActivationLayer.cpp @@ -61,35 +61,14 @@ AbsoluteTolerance tolerance(ActivationLayerInfo::ActivationFunction activ case ActivationLayerInfo::ActivationFunction::SQUARE: return AbsoluteTolerance(data_type == DataType::F16 ? 0.1f : epsilon); case ActivationLayerInfo::ActivationFunction::LOGISTIC: - if(is_data_type_fixed_point(data_type)) - { - return AbsoluteTolerance(5.f); - } - else - { - return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : epsilon); - } + return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : epsilon); case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: return AbsoluteTolerance(data_type == DataType::F16 ? 0.00001f : epsilon); case ActivationLayerInfo::ActivationFunction::SOFT_RELU: case ActivationLayerInfo::ActivationFunction::SQRT: - if(is_data_type_fixed_point(data_type)) - { - return AbsoluteTolerance(5.f); - } - else - { - return AbsoluteTolerance(data_type == DataType::F16 ? 0.01f : 0.00001f); - } + return AbsoluteTolerance(data_type == DataType::F16 ? 0.01f : 0.00001f); case ActivationLayerInfo::ActivationFunction::TANH: - if(is_data_type_fixed_point(data_type)) - { - return AbsoluteTolerance(5.f); - } - else - { - return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : 0.00001f); - } + return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : 0.00001f); default: return AbsoluteTolerance(epsilon); } diff --git a/tests/validation/CL/ArithmeticDivision.cpp b/tests/validation/CL/ArithmeticDivision.cpp index 42e2d223c2..5d4fa1fd5e 100644 --- a/tests/validation/CL/ArithmeticDivision.cpp +++ b/tests/validation/CL/ArithmeticDivision.cpp @@ -57,19 +57,19 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 2), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 2), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 2), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), })), framework::dataset::make("Expected", { false, false, false, false, true })), input1_info, input2_info, output_info, expected) diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp index 30dd8502ca..4ea2eb81a5 100644 --- a/tests/validation/CL/ConvolutionLayer.cpp +++ b/tests/validation/CL/ConvolutionLayer.cpp @@ -71,32 +71,32 @@ TEST_SUITE(CL) TEST_SUITE(ConvolutionLayer) DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0), - TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32, 0), - TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0) + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), + TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32), + TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32) }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0), - TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0) + framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16), + TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32) })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0), - TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32, 0), - TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32, 0) + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32), + TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32), + TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32) })), framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1), PadStrideInfo(1, 2, 1, 1), diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp index 269bf1587b..0fd7ed4ddc 100644 --- a/tests/validation/CL/DeconvolutionLayer.cpp +++ b/tests/validation/CL/DeconvolutionLayer.cpp @@ -103,33 +103,33 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, (combine(datasets::Sm // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights shape - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 4), // Non supported data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 11), // Invalid bias shape - TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights shape + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Non supported data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid bias shape + TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32), // Window shrink + TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QASYMM8, 5), - TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32, 11), - TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0), + framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32), })), - framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(1U), 1, DataType::F16, 0), - TensorInfo(TensorShape(1U), 1, DataType::F32, 0), - TensorInfo(TensorShape(1U), 1, DataType::F32, 5), - TensorInfo(TensorShape(25U, 11U), 1, DataType::F32, 11), - TensorInfo(TensorShape(1U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(1U), 1, DataType::F16), + TensorInfo(TensorShape(1U), 1, DataType::F32), + TensorInfo(TensorShape(1U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(1U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), - TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 5), - TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32), })), framework::dataset::make("PadStrideInfo", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), diff --git a/tests/validation/CL/DepthwiseConvolutionLayer.cpp b/tests/validation/CL/DepthwiseConvolutionLayer.cpp index 5b18f5953b..fad8140848 100644 --- a/tests/validation/CL/DepthwiseConvolutionLayer.cpp +++ b/tests/validation/CL/DepthwiseConvolutionLayer.cpp @@ -56,57 +56,57 @@ TEST_SUITE(DepthwiseConvolutionLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate3x3, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0), // Mismatching data type input/weights - TensorInfo(TensorShape(32U, 18U, 3U), 1, DataType::F32, 0), // Mismatching input feature maps - TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0), // Unsupported weights dimensions - TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::QASYMM8, 0), // Unsupported activation - TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0), // Mismatching depth multiplier - TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0), // Invalid stride - TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0), // Invalid biases size - TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0), // Invalid biases dimensions - TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0), // Invalid output size - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(32U, 18U, 8U), 1, DataType::F32, 0), - TensorInfo(TensorShape(50U, 32U, 8U), 1, DataType::QASYMM8, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32), // Mismatching data type input/weights + TensorInfo(TensorShape(32U, 18U, 3U), 1, DataType::F32), // Mismatching input feature maps + TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32), // Unsupported weights dimensions + TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::QASYMM8), // Unsupported activation + TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32), // Mismatching depth multiplier + TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32), // Invalid stride + TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32), // Invalid biases size + TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32), // Invalid biases dimensions + TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32), // Invalid output size + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink + TensorInfo(TensorShape(32U, 18U, 8U), 1, DataType::F32), + TensorInfo(TensorShape(50U, 32U, 8U), 1, DataType::QASYMM8), }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F16, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 5U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::QASYMM8, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 24U), 1, DataType::QASYMM8, 0), + framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 5U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 24U), 1, DataType::QASYMM8), })), - framework::dataset::make("BiasesInfo", { TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U), 1, DataType::S32, 0), - TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(24U), 1, DataType::S32, 0), + framework::dataset::make("BiasesInfo", { TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::S32), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(2U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(24U), 1, DataType::S32), })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::QASYMM8, 0), - TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U, 16U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(48U, 30U, 24U), 1, DataType::QASYMM8, 0), + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(30U, 16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 18U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(30U, 16U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(48U, 30U, 24U), 1, DataType::QASYMM8), })), framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), @@ -155,41 +155,41 @@ DATA_TEST_CASE(Validate3x3, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip } DATA_TEST_CASE(ValidateGeneric, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/weights - TensorInfo(TensorShape(27U, 13U, 3U), 1, DataType::F32, 0), // Mismatching input feature maps - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching depth multiplier - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases size - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases dimensions - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid output size - TensorInfo(TensorShape(27U, 13U, 8U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 13U, 8U), 1, DataType::QASYMM8, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights + TensorInfo(TensorShape(27U, 13U, 3U), 1, DataType::F32), // Mismatching input feature maps + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching depth multiplier + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size + TensorInfo(TensorShape(27U, 13U, 8U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 8U), 1, DataType::QASYMM8), }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F16, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 24U), 1, DataType::QASYMM8, 0), + framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 24U), 1, DataType::QASYMM8), })), - framework::dataset::make("BiasesInfo", { TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(24U), 1, DataType::S32, 0), + framework::dataset::make("BiasesInfo", { TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(2U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(2U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(24U), 1, DataType::S32), })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 11U, 24U), 1, DataType::QASYMM8, 0), + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 11U, 24U), 1, DataType::QASYMM8), })), framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), diff --git a/tests/validation/CL/DilatedConvolutionLayer.cpp b/tests/validation/CL/DilatedConvolutionLayer.cpp index fdd6cc812a..f748f905d1 100644 --- a/tests/validation/CL/DilatedConvolutionLayer.cpp +++ b/tests/validation/CL/DilatedConvolutionLayer.cpp @@ -61,23 +61,23 @@ TEST_SUITE(CL) TEST_SUITE(DilatedConvolutionLayer) DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(23U, 27U, 23U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0), - TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0) + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 23U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), + TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32) }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 23U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0) + framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 23U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16) })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0), - TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0) + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32), + TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32) })), framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1), PadStrideInfo(1, 2, 1, 1), diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp index a796b6e4da..87f9449359 100644 --- a/tests/validation/CL/DirectConvolutionLayer.cpp +++ b/tests/validation/CL/DirectConvolutionLayer.cpp @@ -61,16 +61,7 @@ const auto data = combine(datasets::SmallDirectConvolutionShapes(), combine(framework::dataset::make("PadY", 0, 2), framework::dataset::make("KernelSize", { 3, 5 })))), framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); -const auto data_fixed_point = combine(datasets::TinyDirectConvolutionShapes(), - combine(framework::dataset::make("StrideX", 1, 3), - combine(framework::dataset::make("StrideY", 1, 3), - combine(concat(combine(framework::dataset::make("PadX", 0), - combine(framework::dataset::make("PadY", 0), - framework::dataset::make("KernelSize", 1))), - combine(framework::dataset::make("PadX", 0, 2), - combine(framework::dataset::make("PadY", 0, 2), - framework::dataset::make("KernelSize", { 3 })))), - framework::dataset::make("NumKernels", { 1, 4, 8, 16 }))))); + /** Activation function Dataset*/ const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", { @@ -89,53 +80,53 @@ TEST_SUITE(DirectConvolutionLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/weights - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching input feature maps - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Unsupported kernel width - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non-rectangular weights dimensions - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights dimensions - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid stride - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases size - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases dimensions - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid output size - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching input feature maps + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported kernel width + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non-rectangular weights dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid stride + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink + TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), }), - framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16, 0), - TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0), + framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32), })), - framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(3U), 1, DataType::F32), + TensorInfo(TensorShape(4U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32), })), framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), diff --git a/tests/validation/CL/LSTMLayer.cpp b/tests/validation/CL/LSTMLayer.cpp index bd43678844..e1d4cbec49 100644 --- a/tests/validation/CL/LSTMLayer.cpp +++ b/tests/validation/CL/LSTMLayer.cpp @@ -49,77 +49,77 @@ TEST_SUITE(LSTMLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(8U, 2U), 1, DataType::U8, 0), // Wrong data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Wrong input size - TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0), // Wrong input weights size - TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0), // Wrong recurrent weights size - TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0), // Wrong cell bias size - TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0), // Wrong cell state size - TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0), // Wrong output size - TensorInfo(TensorShape(8U, 2U), 1, DataType::F32, 0), // Wrong scratch size + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(8U, 2U), 1, DataType::U8), // Wrong data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Wrong input size + TensorInfo(TensorShape(8U, 2U), 1, DataType::F32), // Wrong input weights size + TensorInfo(TensorShape(8U, 2U), 1, DataType::F32), // Wrong recurrent weights size + TensorInfo(TensorShape(8U, 2U), 1, DataType::F32), // Wrong cell bias size + TensorInfo(TensorShape(8U, 2U), 1, DataType::F32), // Wrong cell state size + TensorInfo(TensorShape(8U, 2U), 1, DataType::F32), // Wrong output size + TensorInfo(TensorShape(8U, 2U), 1, DataType::F32), // Wrong scratch size }), - framework::dataset::make("InputWeightsInfo", { TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(8U, 16U), 1, DataType::F32, 0), + framework::dataset::make("InputWeightsInfo", { TensorInfo(TensorShape(8U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(8U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(8U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(8U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(8U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(8U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(8U, 16U), 1, DataType::F32), })), - framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 16U), 1, DataType::F32, 0), + framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(16U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 16U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 16U), 1, DataType::F32), })), - framework::dataset::make("CellBiasInfo", { TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), + framework::dataset::make("CellBiasInfo", { TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(30U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), })), - framework::dataset::make("ProjectionBiasInfo", { TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U), 1, DataType::F32, 0), + framework::dataset::make("ProjectionBiasInfo", { TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), + TensorInfo(TensorShape(16U), 1, DataType::F32), })), - framework::dataset::make("CellStateInfo", { TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), + framework::dataset::make("CellStateInfo", { TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 2U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 2U), 1, DataType::F32), })), - framework::dataset::make("ScratchInfo", { TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(64U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(12U, 2U), 1, DataType::F32, 0), + framework::dataset::make("ScratchInfo", { TensorInfo(TensorShape(64U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(64U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(64U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(64U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(64U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(64U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(64U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(12U, 2U), 1, DataType::F32), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), diff --git a/tests/validation/CL/LocallyConnected.cpp b/tests/validation/CL/LocallyConnected.cpp index d8f236cb12..5381072131 100644 --- a/tests/validation/CL/LocallyConnected.cpp +++ b/tests/validation/CL/LocallyConnected.cpp @@ -52,41 +52,41 @@ TEST_SUITE(LocallyConnected) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/weights - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/bias - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/output - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/weights - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/bias - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/output - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Asymmetric padding - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0) + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/weights + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/bias + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/output + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/weights + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/bias + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/output + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Asymmetric padding + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32) }), - framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F16, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 274U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0) + framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 274U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32) })), - framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F16, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 274U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0) + framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F16), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 274U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F32) })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F16, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 22U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0) + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F16), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 22U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32) })), framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp index a2dbaff272..e640e01079 100644 --- a/tests/validation/CL/NormalizationLayer.cpp +++ b/tests/validation/CL/NormalizationLayer.cpp @@ -71,19 +71,19 @@ TEST_SUITE(NormalizationLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/output - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Even normalization + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non implemented IN_MAP_2D + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), }), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16, 0), - TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), })), framework::dataset::make("NormInfo", { NormalizationLayerInfo(NormType::IN_MAP_1D, 5), NormalizationLayerInfo(NormType::IN_MAP_1D, 5), diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp index 0b8a11fe5d..133152219f 100644 --- a/tests/validation/CL/PoolingLayer.cpp +++ b/tests/validation/CL/PoolingLayer.cpp @@ -65,23 +65,23 @@ TEST_SUITE(PoolingLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0), // Invalid parameters - TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0), // Non-rectangular Global Pooling - TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0), // Invalid output Global Pooling - TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Invalid parameters + TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32), // Non-rectangular Global Pooling + TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling + TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), }), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0), - TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32, 0), - TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32), })), framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), diff --git a/tests/validation/CL/RNNLayer.cpp b/tests/validation/CL/RNNLayer.cpp index 0af6f8ea00..9179c0955c 100644 --- a/tests/validation/CL/RNNLayer.cpp +++ b/tests/validation/CL/RNNLayer.cpp @@ -49,53 +49,53 @@ TEST_SUITE(RNNLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::U8, 0), // Wrong data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Wrong input size - TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0), // Wrong weights size - TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0), // Wrong recurrent weights size - TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0), // Wrong bias size - TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0), // Wrong output size - TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0), // Wrong hidden output size + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::U8), // Wrong data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Wrong input size + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Wrong weights size + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Wrong recurrent weights size + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Wrong bias size + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Wrong output size + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Wrong hidden output size }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), + framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), })), - framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), + framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), })), - framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), + framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(30U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), })), - framework::dataset::make("HiddenStateInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U, 2U), 1, DataType::F32, 0), + framework::dataset::make("HiddenStateInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U, 2U), 1, DataType::F32), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), diff --git a/tests/validation/CL/WidthConcatenateLayer.cpp b/tests/validation/CL/WidthConcatenateLayer.cpp index 36a5e6fcfb..6af3c64f73 100644 --- a/tests/validation/CL/WidthConcatenateLayer.cpp +++ b/tests/validation/CL/WidthConcatenateLayer.cpp @@ -44,20 +44,20 @@ TEST_SUITE(WidthConcatenateLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( - framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/output - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching y dimension - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching total width - TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32, 0) + framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/output + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching y dimension + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching total width + TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32) }), - framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(24U, 27U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32, 0), - TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32, 0), - TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32, 0) + framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(24U, 27U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32) })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(47U, 27U, 5U), 1, DataType::F16, 0), - TensorInfo(TensorShape(75U, 12U, 5U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 27U, 5U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 27U, 5U), 1, DataType::F32, 0) + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(47U, 27U, 5U), 1, DataType::F16), + TensorInfo(TensorShape(75U, 12U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 27U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 27U, 5U), 1, DataType::F32) })), framework::dataset::make("Expected", { false, false, false, true })), input_info1, input_info2, output_info,expected) diff --git a/tests/validation/FixedPoint.h b/tests/validation/FixedPoint.h deleted file mode 100644 index 81c4f53724..0000000000 --- a/tests/validation/FixedPoint.h +++ /dev/null @@ -1,997 +0,0 @@ -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_TEST_VALIDATION_FIXEDPOINT_H__ -#define __ARM_COMPUTE_TEST_VALIDATION_FIXEDPOINT_H__ - -#include "support/ToolchainSupport.h" -#include "tests/Utils.h" - -#include -#include -#include -#include -#include -#include - -namespace arm_compute -{ -namespace test -{ -namespace fixed_point_arithmetic -{ -namespace detail -{ -// Forward declare structs -struct functions; -template -struct constant_expr; -} - -/** Fixed point traits */ -namespace traits -{ -// Promote types -// *INDENT-OFF* -// clang-format off -/** Promote a type */ -template struct promote { }; -/** Promote uint8_t to uint16_t */ -template <> struct promote { using type = uint16_t; /**< Promoted type */ }; -/** Promote int8_t to int16_t */ -template <> struct promote { using type = int16_t; /**< Promoted type */ }; -/** Promote uint16_t to uint32_t */ -template <> struct promote { using type = uint32_t; /**< Promoted type */ }; -/** Promote int16_t to int32_t */ -template <> struct promote { using type = int32_t; /**< Promoted type */ }; -/** Promote uint32_t to uint64_t */ -template <> struct promote { using type = uint64_t; /**< Promoted type */ }; -/** Promote int32_t to int64_t */ -template <> struct promote { using type = int64_t; /**< Promoted type */ }; -/** Promote float to float */ -template <> struct promote { using type = float; /**< Promoted type */ }; -/** Promote half to half */ -template <> struct promote { using type = half; /**< Promoted type */ }; - -/** Get promoted type */ -template -using promote_t = typename promote::type; -// clang-format on -// *INDENT-ON* -} - -/** Strongly typed enum class representing the overflow policy */ -enum class OverflowPolicy -{ - WRAP, /**< Wrap policy */ - SATURATE /**< Saturate policy */ -}; -/** Strongly typed enum class representing the rounding policy */ -enum class RoundingPolicy -{ - TO_ZERO, /**< Round to zero policy */ - TO_NEAREST_EVEN /**< Round to nearest even policy */ -}; - -/** Arbitrary fixed-point arithmetic class */ -template -class fixed_point -{ -public: - // Static Checks - static_assert(std::is_integral::value, "Type is not an integer"); - - /** Constructor (from different fixed point type) - * - * @param[in] val Fixed point - * @param[in] p Fixed point precision - */ - template - fixed_point(fixed_point val, uint8_t p) - : _value(0), _fixed_point_position(p) - { - assert(p > 0 && p < std::numeric_limits::digits); - T v = 0; - - if(std::numeric_limits::digits < std::numeric_limits::digits) - { - val.rescale(p); - v = detail::constant_expr::saturate_cast(val.raw()); - } - else - { - auto v_cast = static_cast>(val); - v_cast.rescale(p); - v = v_cast.raw(); - } - _value = static_cast(v); - } - /** Constructor (from integer) - * - * @param[in] val Integer value to be represented as fixed point - * @param[in] p Fixed point precision - * @param[in] is_raw If true val is a raw fixed point value else an integer - */ - template ::value>::type> - fixed_point(U val, uint8_t p, bool is_raw = false) - : _value(val << p), _fixed_point_position(p) - { - if(is_raw) - { - _value = val; - } - } - /** Constructor (from float) - * - * @param[in] val Float value to be represented as fixed point - * @param[in] p Fixed point precision - */ - fixed_point(float val, uint8_t p) - : _value(detail::constant_expr::to_fixed(val, p)), _fixed_point_position(p) - { - assert(p > 0 && p < std::numeric_limits::digits); - } - /** Constructor (from float string) - * - * @param[in] str Float string to be represented as fixed point - * @param[in] p Fixed point precision - */ - fixed_point(std::string str, uint8_t p) - : _value(detail::constant_expr::to_fixed(support::cpp11::stof(str), p)), _fixed_point_position(p) - { - assert(p > 0 && p < std::numeric_limits::digits); - } - /** Default copy constructor */ - fixed_point &operator=(const fixed_point &) = default; - /** Default move constructor */ - fixed_point &operator=(fixed_point &&) = default; - /** Default copy assignment operator */ - fixed_point(const fixed_point &) = default; - /** Default move assignment operator */ - fixed_point(fixed_point &&) = default; - - /** Float conversion operator - * - * @return Float representation of fixed point - */ - operator float() const - { - return detail::constant_expr::to_float(_value, _fixed_point_position); - } - /** Integer conversion operator - * - * @return Integer representation of fixed point - */ - template ::value>::type> - operator U() const - { - return detail::constant_expr::to_int(_value, _fixed_point_position); - } - /** Convert to different fixed point of different type but same precision - * - * @note Down-conversion might fail. - */ - template - operator fixed_point() - { - U val = static_cast(_value); - if(std::numeric_limits::digits < std::numeric_limits::digits) - { - val = detail::constant_expr::saturate_cast(_value); - } - return fixed_point(val, _fixed_point_position, true); - } - - /** Arithmetic += assignment operator - * - * @param[in] rhs Fixed point operand - * - * @return Reference to this fixed point - */ - template - fixed_point &operator+=(const fixed_point &rhs) - { - fixed_point val(rhs, _fixed_point_position); - _value += val.raw(); - return *this; - } - /** Arithmetic -= assignment operator - * - * @param[in] rhs Fixed point operand - * - * @return Reference to this fixed point - */ - template - fixed_point &operator-=(const fixed_point &rhs) - { - fixed_point val(rhs, _fixed_point_position); - _value -= val.raw(); - return *this; - } - - /** Raw value accessor - * - * @return Raw fixed point value - */ - T raw() const - { - return _value; - } - /** Precision accessor - * - * @return Precision of fixed point - */ - uint8_t precision() const - { - return _fixed_point_position; - } - /** Rescale a fixed point to a new precision - * - * @param[in] p New fixed point precision - */ - void rescale(uint8_t p) - { - assert(p > 0 && p < std::numeric_limits::digits); - - using promoted_T = typename traits::promote::type; - promoted_T val = _value; - if(p > _fixed_point_position) - { - val <<= (p - _fixed_point_position); - } - else if(p < _fixed_point_position) - { - uint8_t pbar = _fixed_point_position - p; - val += (pbar != 0) ? (1 << (pbar - 1)) : 0; - val >>= pbar; - } - - _value = detail::constant_expr::saturate_cast(val); - _fixed_point_position = p; - } - -private: - T _value; /**< Fixed point raw value */ - uint8_t _fixed_point_position; /**< Fixed point precision */ -}; - -namespace detail -{ -/** Count the number of leading zero bits in the given value. - * - * @param[in] value Input value. - * - * @return Number of leading zero bits. - */ -template -constexpr int clz(T value) -{ - using unsigned_T = typename std::make_unsigned::type; - // __builtin_clz is available for int. Need to correct reported number to - // match the original type. - return __builtin_clz(value) - (32 - std::numeric_limits::digits); -} - -/** Constant expressions */ -template -struct constant_expr -{ - /** Calculate representation of 1 in fixed point given a fixed point precision - * - * @param[in] p Fixed point precision - * - * @return Representation of value 1 in fixed point. - */ - static constexpr T fixed_one(uint8_t p) - { - return (1 << p); - } - /** Calculate fixed point precision step given a fixed point precision - * - * @param[in] p Fixed point precision - * - * @return Fixed point precision step - */ - static constexpr float fixed_step(uint8_t p) - { - return (1.0f / static_cast(1 << p)); - } - - /** Convert a fixed point value to float given its precision. - * - * @param[in] val Fixed point value - * @param[in] p Fixed point precision - * - * @return Float representation of the fixed point number - */ - static constexpr float to_float(T val, uint8_t p) - { - return static_cast(val * fixed_step(p)); - } - /** Convert a fixed point value to integer given its precision. - * - * @param[in] val Fixed point value - * @param[in] p Fixed point precision - * - * @return Integer of the fixed point number - */ - static constexpr T to_int(T val, uint8_t p) - { - return val >> p; - } - /** Convert a single precision floating point value to a fixed point representation given its precision. - * - * @param[in] val Floating point value - * @param[in] p Fixed point precision - * - * @return The raw fixed point representation - */ - static constexpr T to_fixed(float val, uint8_t p) - { - return static_cast(saturate_cast(val * fixed_one(p) + ((val >= 0) ? 0.5 : -0.5))); - } - /** Clamp value between two ranges - * - * @param[in] val Value to clamp - * @param[in] min Minimum value to clamp to - * @param[in] max Maximum value to clamp to - * - * @return clamped value - */ - static constexpr T clamp(T val, T min, T max) - { - return std::min(std::max(val, min), max); - } - /** Saturate given number - * - * @param[in] val Value to saturate - * - * @return Saturated value - */ - template - static constexpr T saturate_cast(U val) - { - return static_cast(std::min(std::max(val, static_cast(std::numeric_limits::min())), static_cast(std::numeric_limits::max()))); - } -}; -/** Functions */ -struct functions -{ - /** Output stream operator - * - * @param[in] s Output stream - * @param[in] x Fixed point value - * - * @return Reference output to updated stream - */ - template - static std::basic_ostream &write(std::basic_ostream &s, fixed_point &x) - { - return s << static_cast(x); - } - /** Signbit of a fixed point number. - * - * @param[in] x Fixed point number - * - * @return True if negative else false. - */ - template - static bool signbit(fixed_point x) - { - return ((x.raw() >> std::numeric_limits::digits) != 0); - } - /** Checks if two fixed point numbers are equal - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return True if fixed points are equal else false - */ - template - static bool isequal(fixed_point x, fixed_point y) - { - uint8_t p = std::min(x.precision(), y.precision()); - x.rescale(p); - y.rescale(p); - return (x.raw() == y.raw()); - } - /** Checks if two fixed point number are not equal - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return True if fixed points are not equal else false - */ - template - static bool isnotequal(fixed_point x, fixed_point y) - { - return !isequal(x, y); - } - /** Checks if one fixed point is greater than the other - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return True if fixed point is greater than other - */ - template - static bool isgreater(fixed_point x, fixed_point y) - { - uint8_t p = std::min(x.precision(), y.precision()); - x.rescale(p); - y.rescale(p); - return (x.raw() > y.raw()); - } - /** Checks if one fixed point is greater or equal than the other - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return True if fixed point is greater or equal than other - */ - template - static bool isgreaterequal(fixed_point x, fixed_point y) - { - uint8_t p = std::min(x.precision(), y.precision()); - x.rescale(p); - y.rescale(p); - return (x.raw() >= y.raw()); - } - /** Checks if one fixed point is less than the other - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return True if fixed point is less than other - */ - template - static bool isless(fixed_point x, fixed_point y) - { - uint8_t p = std::min(x.precision(), y.precision()); - x.rescale(p); - y.rescale(p); - return (x.raw() < y.raw()); - } - /** Checks if one fixed point is less or equal than the other - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return True if fixed point is less or equal than other - */ - template - static bool islessequal(fixed_point x, fixed_point y) - { - uint8_t p = std::min(x.precision(), y.precision()); - x.rescale(p); - y.rescale(p); - return (x.raw() <= y.raw()); - } - /** Checks if one fixed point is less or greater than the other - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return True if fixed point is less or greater than other - */ - template - static bool islessgreater(fixed_point x, fixed_point y) - { - return isnotequal(x, y); - } - /** Clamp fixed point to specific range. - * - * @param[in] x Fixed point operand - * @param[in] min Minimum value to clamp to - * @param[in] max Maximum value to clamp to - * - * @return Clamped result - */ - template - static fixed_point clamp(fixed_point x, T min, T max) - { - return fixed_point(constant_expr::clamp(x.raw(), min, max), x.precision(), true); - } - /** Negate number - * - * @param[in] x Fixed point operand - * - * @return Negated fixed point result - */ - template - static fixed_point negate(fixed_point x) - { - using promoted_T = typename traits::promote::type; - promoted_T val = -x.raw(); - if(OP == OverflowPolicy::SATURATE) - { - val = constant_expr::saturate_cast(val); - } - return fixed_point(static_cast(val), x.precision(), true); - } - /** Perform addition among two fixed point numbers - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return Result fixed point with precision equal to minimum precision of both operands - */ - template - static fixed_point add(fixed_point x, fixed_point y) - { - uint8_t p = std::min(x.precision(), y.precision()); - x.rescale(p); - y.rescale(p); - if(OP == OverflowPolicy::SATURATE) - { - using type = typename traits::promote::type; - type val = static_cast(x.raw()) + static_cast(y.raw()); - val = constant_expr::saturate_cast(val); - return fixed_point(static_cast(val), p, true); - } - else - { - return fixed_point(x.raw() + y.raw(), p, true); - } - } - /** Perform subtraction among two fixed point numbers - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return Result fixed point with precision equal to minimum precision of both operands - */ - template - static fixed_point sub(fixed_point x, fixed_point y) - { - uint8_t p = std::min(x.precision(), y.precision()); - x.rescale(p); - y.rescale(p); - if(OP == OverflowPolicy::SATURATE) - { - using type = typename traits::promote::type; - type val = static_cast(x.raw()) - static_cast(y.raw()); - val = constant_expr::saturate_cast(val); - return fixed_point(static_cast(val), p, true); - } - else - { - return fixed_point(x.raw() - y.raw(), p, true); - } - } - /** Perform multiplication among two fixed point numbers - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return Result fixed point with precision equal to minimum precision of both operands - */ - template - static fixed_point mul(fixed_point x, fixed_point y) - { - using promoted_T = typename traits::promote::type; - uint8_t p_min = std::min(x.precision(), y.precision()); - uint8_t p_max = std::max(x.precision(), y.precision()); - promoted_T round_factor = (1 << (p_max - 1)); - promoted_T val = ((static_cast(x.raw()) * static_cast(y.raw())) + round_factor) >> p_max; - if(OP == OverflowPolicy::SATURATE) - { - val = constant_expr::saturate_cast(val); - } - return fixed_point(static_cast(val), p_min, true); - } - /** Perform division among two fixed point numbers - * - * @param[in] x First fixed point operand - * @param[in] y Second fixed point operand - * - * @return Result fixed point with precision equal to minimum precision of both operands - */ - template - static fixed_point div(fixed_point x, fixed_point y) - { - using promoted_T = typename traits::promote::type; - uint8_t p = std::min(x.precision(), y.precision()); - promoted_T denom = static_cast(y.raw()); - if(denom != 0) - { - promoted_T val = (static_cast(x.raw()) << std::max(x.precision(), y.precision())) / denom; - if(OP == OverflowPolicy::SATURATE) - { - val = constant_expr::saturate_cast(val); - } - return fixed_point(static_cast(val), p, true); - } - else - { - T val = (x.raw() < 0) ? std::numeric_limits::min() : std::numeric_limits::max(); - return fixed_point(val, p, true); - } - } - /** Shift left - * - * @param[in] x Fixed point operand - * @param[in] shift Shift value - * - * @return Shifted value - */ - template - static fixed_point shift_left(fixed_point x, size_t shift) - { - using promoted_T = typename traits::promote::type; - promoted_T val = static_cast(x.raw()) << shift; - if(OP == OverflowPolicy::SATURATE) - { - val = constant_expr::saturate_cast(val); - } - return fixed_point(static_cast(val), x.precision(), true); - } - /** Shift right - * - * @param[in] x Fixed point operand - * @param[in] shift Shift value - * - * @return Shifted value - */ - template - static fixed_point shift_right(fixed_point x, size_t shift) - { - return fixed_point(x.raw() >> shift, x.precision(), true); - } - /** Calculate absolute value - * - * @param[in] x Fixed point operand - * - * @return Absolute value of operand - */ - template - static fixed_point abs(fixed_point x) - { - using promoted_T = typename traits::promote::type; - T val = (x.raw() < 0) ? constant_expr::saturate_cast(-static_cast(x.raw())) : x.raw(); - return fixed_point(val, x.precision(), true); - } - /** Calculate the logarithm of a fixed point number - * - * @param[in] x Fixed point operand - * - * @return Logarithm value of operand - */ - template - static fixed_point log(fixed_point x) - { - uint8_t p = x.precision(); - auto const_one = fixed_point(static_cast(1), p); - - // Logarithm of 1 is zero and logarithm of negative values is not defined in R, so return 0. - // Also, log(x) == -log(1/x) for 0 < x < 1. - if(isequal(x, const_one) || islessequal(x, fixed_point(static_cast(0), p))) - { - return fixed_point(static_cast(0), p, true); - } - else if(isless(x, const_one)) - { - return mul(log(div(const_one, x)), fixed_point(-1, p)); - } - - // Remove even powers of 2 - T shift_val = 31 - __builtin_clz(x.raw() >> p); - x = shift_right(x, shift_val); - x = sub(x, const_one); - - // Constants - auto ln2 = fixed_point(0.6931471, p); - auto A = fixed_point(1.4384189, p); - auto B = fixed_point(-0.67719, p); - auto C = fixed_point(0.3218538, p); - auto D = fixed_point(-0.0832229, p); - - // Polynomial expansion - auto sum = add(mul(x, D), C); - sum = add(mul(x, sum), B); - sum = add(mul(x, sum), A); - sum = mul(x, sum); - - return mul(add(sum, fixed_point(static_cast(shift_val), p)), ln2); - } - /** Calculate the exponential of a fixed point number. - * - * exp(x) = exp(floor(x)) * exp(x - floor(x)) - * = pow(2, floor(x) / ln(2)) * exp(x - floor(x)) - * = exp(x - floor(x)) << (floor(x) / ln(2)) - * - * @param[in] x Fixed point operand - * - * @return Exponential value of operand - */ - template - static fixed_point exp(fixed_point x) - { - uint8_t p = x.precision(); - // Constants - auto const_one = fixed_point(1, p); - auto ln2 = fixed_point(0.6931471, p); - auto inv_ln2 = fixed_point(1.442695, p); - auto A = fixed_point(0.9978546, p); - auto B = fixed_point(0.4994721, p); - auto C = fixed_point(0.1763723, p); - auto D = fixed_point(0.0435108, p); - - T scaled_int_part = detail::constant_expr::to_int(mul(x, inv_ln2).raw(), p); - - // Polynomial expansion - auto frac_part = sub(x, mul(ln2, fixed_point(scaled_int_part, p))); - auto taylor = add(mul(frac_part, D), C); - taylor = add(mul(frac_part, taylor), B); - taylor = add(mul(frac_part, taylor), A); - taylor = mul(frac_part, taylor); - taylor = add(taylor, const_one); - - // Saturate value - if(static_cast(clz(taylor.raw())) <= scaled_int_part) - { - return fixed_point(std::numeric_limits::max(), p, true); - } - - return (scaled_int_part < 0) ? shift_right(taylor, -scaled_int_part) : shift_left(taylor, scaled_int_part); - } - /** Calculate the inverse square root of a fixed point number - * - * @param[in] x Fixed point operand - * - * @return Inverse square root value of operand - */ - template - static fixed_point inv_sqrt(fixed_point x) - { - const uint8_t p = x.precision(); - int8_t shift = std::numeric_limits::digits - (p + detail::clz(x.raw())); - - shift += std::numeric_limits::is_signed ? 1 : 0; - - // Use volatile to restrict compiler optimizations on shift as compiler reports maybe-uninitialized error on Android - volatile int8_t *shift_ptr = &shift; - - auto const_three = fixed_point(3, p); - auto a = (*shift_ptr < 0) ? shift_left(x, -(shift)) : shift_right(x, shift); - fixed_point x2 = a; - - // We need three iterations to find the result for QS8 and five for QS16 - constexpr int num_iterations = std::is_same::value ? 3 : 5; - for(int i = 0; i < num_iterations; ++i) - { - fixed_point three_minus_dx = sub(const_three, mul(a, mul(x2, x2))); - x2 = shift_right(mul(x2, three_minus_dx), 1); - } - - return (shift < 0) ? shift_left(x2, (-shift) >> 1) : shift_right(x2, shift >> 1); - } - /** Calculate the hyperbolic tangent of a fixed point number - * - * @param[in] x Fixed point operand - * - * @return Hyperbolic tangent of the operand - */ - template - static fixed_point tanh(fixed_point x) - { - uint8_t p = x.precision(); - // Constants - auto const_one = fixed_point(1, p); - auto const_two = fixed_point(2, p); - - auto exp2x = exp(const_two * x); - auto num = exp2x - const_one; - auto den = exp2x + const_one; - auto tanh = num / den; - - return tanh; - } - /** Calculate the a-th power of a fixed point number. - * - * The power is computed as x^a = e^(log(x) * a) - * - * @param[in] x Fixed point operand - * @param[in] a Fixed point exponent - * - * @return a-th power of the operand - */ - template - static fixed_point pow(fixed_point x, fixed_point a) - { - return exp(log(x) * a); - } -}; - -template -bool operator==(const fixed_point &lhs, const fixed_point &rhs) -{ - return functions::isequal(lhs, rhs); -} -template -bool operator!=(const fixed_point &lhs, const fixed_point &rhs) -{ - return !operator==(lhs, rhs); -} -template -bool operator<(const fixed_point &lhs, const fixed_point &rhs) -{ - return functions::isless(lhs, rhs); -} -template -bool operator>(const fixed_point &lhs, const fixed_point &rhs) -{ - return operator<(rhs, lhs); -} -template -bool operator<=(const fixed_point &lhs, const fixed_point &rhs) -{ - return !operator>(lhs, rhs); -} -template -bool operator>=(const fixed_point &lhs, const fixed_point &rhs) -{ - return !operator<(lhs, rhs); -} -template -fixed_point operator+(const fixed_point &lhs, const fixed_point &rhs) -{ - return functions::add(lhs, rhs); -} -template -fixed_point operator-(const fixed_point &lhs, const fixed_point &rhs) -{ - return functions::sub(lhs, rhs); -} -template -fixed_point operator-(const fixed_point &rhs) -{ - return functions::negate(rhs); -} -template -fixed_point operator*(fixed_point x, fixed_point y) -{ - return functions::mul(x, y); -} -template -fixed_point operator/(fixed_point x, fixed_point y) -{ - return functions::div(x, y); -} -template -fixed_point operator>>(fixed_point x, size_t shift) -{ - return functions::shift_right(x, shift); -} -template -fixed_point operator<<(fixed_point x, size_t shift) -{ - return functions::shift_left(x, shift); -} -template -std::basic_ostream &operator<<(std::basic_ostream &s, fixed_point x) -{ - return functions::write(s, x); -} -template -inline fixed_point min(fixed_point x, fixed_point y) -{ - return x > y ? y : x; -} -template -inline fixed_point max(fixed_point x, fixed_point y) -{ - return x > y ? x : y; -} -template -inline fixed_point add(fixed_point x, fixed_point y) -{ - return functions::add(x, y); -} -template -inline fixed_point sub(fixed_point x, fixed_point y) -{ - return functions::sub(x, y); -} -template -inline fixed_point mul(fixed_point x, fixed_point y) -{ - return functions::mul(x, y); -} -template -inline fixed_point div(fixed_point x, fixed_point y) -{ - return functions::div(x, y); -} -template -inline fixed_point abs(fixed_point x) -{ - return functions::abs(x); -} -template -inline fixed_point clamp(fixed_point x, T min, T max) -{ - return functions::clamp(x, min, max); -} -template -inline fixed_point exp(fixed_point x) -{ - return functions::exp(x); -} -template -inline fixed_point log(fixed_point x) -{ - return functions::log(x); -} -template -inline fixed_point inv_sqrt(fixed_point x) -{ - return functions::inv_sqrt(x); -} -template -inline fixed_point tanh(fixed_point x) -{ - return functions::tanh(x); -} -template -inline fixed_point pow(fixed_point x, fixed_point a) -{ - return functions::pow(x, a); -} -} // namespace detail - -// Expose operators -using detail::operator==; -using detail::operator!=; -using detail::operator<; -using detail::operator>; -using detail::operator<=; -using detail::operator>=; -using detail::operator+; -using detail::operator-; -using detail::operator*; -using detail::operator/; -using detail::operator>>; -using detail::operator<<; - -// Expose additional functions -using detail::min; -using detail::max; -using detail::add; -using detail::sub; -using detail::mul; -using detail::div; -using detail::abs; -using detail::clamp; -using detail::exp; -using detail::log; -using detail::inv_sqrt; -using detail::tanh; -using detail::pow; -// TODO: floor -// TODO: ceil -// TODO: sqrt -} // namespace fixed_point_arithmetic -} // namespace test -} // namespace arm_compute -#endif /*__ARM_COMPUTE_TEST_VALIDATION_FIXEDPOINT_H__ */ diff --git a/tests/validation/GLES_COMPUTE/ActivationLayer.cpp b/tests/validation/GLES_COMPUTE/ActivationLayer.cpp index a8c7253b8f..7676b858f6 100644 --- a/tests/validation/GLES_COMPUTE/ActivationLayer.cpp +++ b/tests/validation/GLES_COMPUTE/ActivationLayer.cpp @@ -61,35 +61,14 @@ AbsoluteTolerance tolerance(ActivationLayerInfo::ActivationFunction activ case ActivationLayerInfo::ActivationFunction::SQUARE: return AbsoluteTolerance(data_type == DataType::F16 ? 0.1f : epsilon); case ActivationLayerInfo::ActivationFunction::LOGISTIC: - if(is_data_type_fixed_point(data_type)) - { - return AbsoluteTolerance(5.f); - } - else - { - return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : epsilon); - } + return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : epsilon); case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: return AbsoluteTolerance(data_type == DataType::F16 ? 0.00001f : epsilon); case ActivationLayerInfo::ActivationFunction::SOFT_RELU: case ActivationLayerInfo::ActivationFunction::SQRT: - if(is_data_type_fixed_point(data_type)) - { - return AbsoluteTolerance(5.f); - } - else - { - return AbsoluteTolerance(data_type == DataType::F16 ? 0.01f : 0.00001f); - } + return AbsoluteTolerance(data_type == DataType::F16 ? 0.01f : 0.00001f); case ActivationLayerInfo::ActivationFunction::TANH: - if(is_data_type_fixed_point(data_type)) - { - return AbsoluteTolerance(5.f); - } - else - { - return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : 0.00001f); - } + return AbsoluteTolerance(data_type == DataType::F16 ? 0.001f : 0.00001f); default: return AbsoluteTolerance(epsilon); } diff --git a/tests/validation/GLES_COMPUTE/PoolingLayer.cpp b/tests/validation/GLES_COMPUTE/PoolingLayer.cpp index ac1bd724ac..7679007a82 100644 --- a/tests/validation/GLES_COMPUTE/PoolingLayer.cpp +++ b/tests/validation/GLES_COMPUTE/PoolingLayer.cpp @@ -59,17 +59,17 @@ TEST_SUITE(PoolingLayer) DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo", { - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination - TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0), // Non-rectangular Global Pooling - TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0), // Invalid output Global Pooling - TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination + TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32), // Non-rectangular Global Pooling + TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling + TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), }), framework::dataset::make("OutputInfo", { - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32, 0), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16), TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32), TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32), TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32), })), framework::dataset::make("PoolInfo", { diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h index 2b4d277e92..814d1f5ed0 100644 --- a/tests/validation/Helpers.h +++ b/tests/validation/Helpers.h @@ -176,13 +176,11 @@ void fill_lookuptable(T &&table) } /** Helper function to get the testing range for batch normalization layer. - * - * @param[in] fixed_point_position (Optional) Number of bits for the fractional part. Defaults to 0. * * @return A pair containing the lower upper testing bounds. */ template -std::pair get_batchnormalization_layer_test_bounds(int fixed_point_position = 0) +std::pair get_batchnormalization_layer_test_bounds() { const bool is_float = std::is_floating_point::value; std::pair bounds; @@ -194,7 +192,7 @@ std::pair get_batchnormalization_layer_test_bounds(int fixed_point_positio } else { - bounds = std::make_pair(1, 1 << (fixed_point_position)); + bounds = std::make_pair(1, 1); } return bounds; diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index 591d1424c8..1d82ff0712 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -75,20 +75,20 @@ TEST_SUITE(NEON) TEST_SUITE(ConvolutionLayer) DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32, 0), - TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0), - TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0) + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), + TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32) }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0) + framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16) })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0) + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32) })), framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp index 87d413f202..277953badb 100644 --- a/tests/validation/NEON/DeconvolutionLayer.cpp +++ b/tests/validation/NEON/DeconvolutionLayer.cpp @@ -100,33 +100,33 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, (combine(datasets::Sm // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights shape - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16, 4), // Non supported data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 11), // Invalid bias shape - TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights shape + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), // Non supported data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid bias shape + TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32), // Window shrink + TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16, 5), - TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32, 11), - TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32, 0), + framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32), })), - framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(1U), 1, DataType::F16, 0), - TensorInfo(TensorShape(1U), 1, DataType::F32, 0), - TensorInfo(TensorShape(1U), 1, DataType::F32, 5), - TensorInfo(TensorShape(25U, 11U), 1, DataType::F32, 11), - TensorInfo(TensorShape(1U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(1U), 1, DataType::F16), + TensorInfo(TensorShape(1U), 1, DataType::F32), + TensorInfo(TensorShape(1U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(1U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), - TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 5), - TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32), })), framework::dataset::make("PadStrideInfo", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp index 7cfffc0c2b..25b357ebed 100644 --- a/tests/validation/NEON/DilatedConvolutionLayer.cpp +++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp @@ -64,20 +64,20 @@ TEST_SUITE(NEON) TEST_SUITE(DilatedConvolutionLayer) DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(8U, 8U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0), - TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0) + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(8U, 8U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), + TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32) }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0) + framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16) })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(6U, 6U, 1U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0) + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(6U, 6U, 1U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32) })), framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp index bf5b33c9a2..acd0e5d64b 100644 --- a/tests/validation/NEON/DirectConvolutionLayer.cpp +++ b/tests/validation/NEON/DirectConvolutionLayer.cpp @@ -80,45 +80,45 @@ TEST_SUITE(DirectConvolutionLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/weights - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching input feature maps - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Unsupported kernel width - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non-rectangular weights dimensions - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights dimensions - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid stride - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases size - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases dimensions - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid output size + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching input feature maps + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported kernel width + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non-rectangular weights dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid stride + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size }), - framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16, 0), - TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), + framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), })), - framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(3U), 1, DataType::F32), + TensorInfo(TensorShape(4U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), - TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32), })), framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index eb350e1029..9eba3c85c1 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -102,7 +102,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::c // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Input not a multiple of 4 - TensorInfo(TensorShape(21U, 13U), 1, DataType::S32, 2), // Mismatching data type + TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Mismatching data type TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), diff --git a/tests/validation/NEON/LocallyConnected.cpp b/tests/validation/NEON/LocallyConnected.cpp index 0c36ff6c85..bd0999df50 100644 --- a/tests/validation/NEON/LocallyConnected.cpp +++ b/tests/validation/NEON/LocallyConnected.cpp @@ -51,41 +51,41 @@ TEST_SUITE(LocallyConnected) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/weights - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/bias - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/output - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/weights - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/bias - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching shape input/output - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Asymmetric padding - TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0) + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/weights + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/bias + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching data type input/output + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/weights + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/bias + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Mismatching shape input/output + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32), // Asymmetric padding + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32) }), - framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F16, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 274U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32, 0) + framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 274U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 5U, 21U, 275U), 1, DataType::F32) })), - framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F16, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 274U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0), - TensorInfo(TensorShape(21U, 275U), 1, DataType::F32, 0) + framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F16), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 274U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F32), + TensorInfo(TensorShape(21U, 275U), 1, DataType::F32) })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F16, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 22U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0) + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F16), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 22U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32) })), framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp index 02cca0b452..a4321000f5 100644 --- a/tests/validation/NEON/NormalizationLayer.cpp +++ b/tests/validation/NEON/NormalizationLayer.cpp @@ -66,19 +66,19 @@ TEST_SUITE(NormalizationLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/output - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Even normalization + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non implemented IN_MAP_2D + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), }), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16, 0), - TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), })), framework::dataset::make("NormInfo", { NormalizationLayerInfo(NormType::IN_MAP_1D, 5), NormalizationLayerInfo(NormType::IN_MAP_1D, 5), diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index bbfca46ca9..336c066fa9 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -71,21 +71,21 @@ TEST_SUITE(PoolingLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination - TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0), // Non-rectangular Global Pooling - TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0), // Invalid output Global Pooling - TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination + TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32), // Non-rectangular Global Pooling + TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling + TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), }), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), - TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), - TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), })), framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), diff --git a/tests/validation/NEON/RNNLayer.cpp b/tests/validation/NEON/RNNLayer.cpp index 7aa3befd03..a5f84990f2 100644 --- a/tests/validation/NEON/RNNLayer.cpp +++ b/tests/validation/NEON/RNNLayer.cpp @@ -49,59 +49,59 @@ TEST_SUITE(RNNLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::U8, 0), // Wrong data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Wrong input size - TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0), // Wrong weights size - TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0), // Wrong recurrent weights size - TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0), // Wrong bias size - TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0), // Wrong output size - TensorInfo(TensorShape(27U, 13U), 1, DataType::F32, 0), // Wrong hidden output size - TensorInfo(TensorShape(32U, 32U), 1, DataType::F32, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::U8), // Wrong data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Wrong input size + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Wrong weights size + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Wrong recurrent weights size + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Wrong bias size + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Wrong output size + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Wrong hidden output size + TensorInfo(TensorShape(32U, 32U), 1, DataType::F32), }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(27U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 32U), 1, DataType::F32, 0), + framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 32U), 1, DataType::F32), })), - framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 32U), 1, DataType::F32, 0), + framework::dataset::make("RecurrentWeightsInfo", { TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 32U), 1, DataType::F32), })), - framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(30U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U), 1, DataType::F32, 0), + framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(30U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(32U), 1, DataType::F32), })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 32U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 32U), 1, DataType::F32), })), - framework::dataset::make("HiddenStateInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U), 1, DataType::F32, 0), - TensorInfo(TensorShape(11U, 13U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(32U, 32U), 1, DataType::F32, 0), + framework::dataset::make("HiddenStateInfo", { TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 32U), 1, DataType::F32), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), diff --git a/tests/validation/NEON/Scale.cpp b/tests/validation/NEON/Scale.cpp index 5f5cfdd808..0d4a86e372 100644 --- a/tests/validation/NEON/Scale.cpp +++ b/tests/validation/NEON/Scale.cpp @@ -77,17 +77,17 @@ TEST_SUITE(Scale) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8, 0), // Mismatching data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Unsupported sampling point - TensorInfo(TensorShape(4U, 27U, 13U), 1, DataType::F32, 0), // Invalid policy - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Insufficient padding - TensorInfo(TensorShape(4U, 27U, 13U), 1, DataType::F32, 0), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Mismatching data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Unsupported sampling point + TensorInfo(TensorShape(4U, 27U, 13U), 1, DataType::F32), // Invalid policy + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Insufficient padding + TensorInfo(TensorShape(4U, 27U, 13U), 1, DataType::F32), }), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U, 132U, 25U), 1, DataType::F32, 0), - TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32, 0), - TensorInfo(TensorShape(4U, 132U, 25U), 1, DataType::F32, 0), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(4U, 132U, 25U), 1, DataType::F32), + TensorInfo(TensorShape(132U, 25U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(4U, 132U, 25U), 1, DataType::F32), })), framework::dataset::make("InterpolationPolicy", { InterpolationPolicy::NEAREST_NEIGHBOR, InterpolationPolicy::NEAREST_NEIGHBOR, diff --git a/tests/validation/Validation.h b/tests/validation/Validation.h index 0c96052368..9ce597b621 100644 --- a/tests/validation/Validation.h +++ b/tests/validation/Validation.h @@ -24,7 +24,6 @@ #ifndef __ARM_COMPUTE_TEST_VALIDATION_H__ #define __ARM_COMPUTE_TEST_VALIDATION_H__ -#include "arm_compute/core/FixedPoint.h" #include "arm_compute/core/IArray.h" #include "arm_compute/core/Types.h" #include "support/ToolchainSupport.h" diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h index 24539545ca..499628c438 100644 --- a/tests/validation/fixtures/PoolingLayerFixture.h +++ b/tests/validation/fixtures/PoolingLayerFixture.h @@ -65,16 +65,10 @@ protected: std::uniform_real_distribution<> distribution(-1.f, 1.f); library->fill(tensor, distribution, 0); } - else if(is_data_type_quantized_asymmetric(tensor.data_type())) + else // data type is quantized_asymmetric { library->fill_tensor_uniform(tensor, 0); } - else - { - const int one_fixed = 1; - std::uniform_int_distribution<> distribution(-one_fixed, one_fixed); - library->fill(tensor, distribution, 0); - } } TensorType compute_target(TensorShape shape, PoolingLayerInfo info, diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h index 59ce5192ff..99c0710f7f 100644 --- a/tests/validation/fixtures/SoftmaxLayerFixture.h +++ b/tests/validation/fixtures/SoftmaxLayerFixture.h @@ -64,17 +64,11 @@ protected: std::uniform_real_distribution<> distribution(-1000.f, 1000.f); library->fill(tensor, distribution, 0); } - else if(is_data_type_quantized_asymmetric(tensor.data_type())) + else // data type is quantized_asymmetric { std::uniform_int_distribution<> distribution(0, 100); library->fill(tensor, distribution, 0); } - else - { - const int one_fixed = 1; - std::uniform_int_distribution<> distribution(-one_fixed, one_fixed); - library->fill(tensor, distribution, 0); - } } TensorType compute_target(const TensorShape &shape, DataType data_type, @@ -138,20 +132,6 @@ public: } }; -template -class SoftmaxValidationFixedPointFixture : public SoftmaxValidationGenericFixture -{ -public: - template - void setup(TensorShape shape, DataType data_type) - { - SoftmaxValidationGenericFixture::setup(shape, - data_type, - QuantizationInfo(), - 1.0f); - } -}; - template class SoftmaxValidationQuantizedFixture : public SoftmaxValidationGenericFixture { diff --git a/tests/validation/reference/ArithmeticSubtraction.cpp b/tests/validation/reference/ArithmeticSubtraction.cpp index bed2d37090..f39d01f9e8 100644 --- a/tests/validation/reference/ArithmeticSubtraction.cpp +++ b/tests/validation/reference/ArithmeticSubtraction.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -23,7 +23,6 @@ */ #include "ArithmeticSubtraction.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp index 3d1a6ed7d7..4ea3769c2c 100644 --- a/tests/validation/reference/BatchNormalizationLayer.cpp +++ b/tests/validation/reference/BatchNormalizationLayer.cpp @@ -25,7 +25,6 @@ #include "ActivationLayer.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/ChannelCombine.cpp b/tests/validation/reference/ChannelCombine.cpp index c1ec3ec578..b76dcaca8c 100644 --- a/tests/validation/reference/ChannelCombine.cpp +++ b/tests/validation/reference/ChannelCombine.cpp @@ -24,7 +24,6 @@ #include "ChannelCombine.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/ChannelExtract.cpp b/tests/validation/reference/ChannelExtract.cpp index 595bb13098..6f17fc06fe 100644 --- a/tests/validation/reference/ChannelExtract.cpp +++ b/tests/validation/reference/ChannelExtract.cpp @@ -24,7 +24,6 @@ #include "ChannelExtract.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/ColorConvert.cpp b/tests/validation/reference/ColorConvert.cpp index a8a530498e..6aa2ffa14c 100644 --- a/tests/validation/reference/ColorConvert.cpp +++ b/tests/validation/reference/ColorConvert.cpp @@ -24,7 +24,6 @@ #include "ColorConvert.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" #include "tests/validation/reference/ColorConvertHelper.h" diff --git a/tests/validation/reference/Convolution3d.h b/tests/validation/reference/Convolution3d.h index 700175880b..2e5fefd99a 100644 --- a/tests/validation/reference/Convolution3d.h +++ b/tests/validation/reference/Convolution3d.h @@ -25,7 +25,6 @@ #define __ARM_COMPUTE_TEST_VALIDATION_CONVOLUTION_H__ #include "arm_compute/core/utils/quantization/AsymmHelpers.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" #include "tests/validation/reference/UtilsQuantizedAsymm.h" @@ -91,74 +90,16 @@ inline void convolution3d(const SimpleTensor &in, const SimpleTensor &weig *out_ptr = acc + (*b_ptr); } -// 3D convolution for fixed point type -template < typename T, typename TB, typename std::enable_if < std::is_integral::value &&std::is_integral::value, int >::type = 0 > +// 3D convolution for QASYMM8 type +template < typename T, typename TB, typename std::enable_if < std::is_same::value &&std::is_same::value, int >::type = 0 > inline void convolution3d(const SimpleTensor &in, const SimpleTensor &weights, const SimpleTensor &bias, SimpleTensor &out, int i_offset, int w_offset, int b_offset, int o_offset, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int dilation_x = 1, int dilation_y = 1) { - const T *in_ptr = in.data() + i_offset; - const T *w_ptr = weights.data() + w_offset; - const T *b_ptr = bias.data() + b_offset; - T *out_ptr = out.data() + o_offset; - int fixed_point_position = in.fixed_point_position(); - - const int half_width_weights_start = width_weights / 2; - const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start; - const int half_height_weights_start = height_weights / 2; - const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start; - - using namespace fixed_point_arithmetic; - using promoted_type = fixed_point_arithmetic::traits::promote_t; - - // Reset accumulator - fixed_point acc(0, fixed_point_position); - - // Compute a 2D convolution for each IFM and accumulate the result - for(int ifm = 0; ifm < depth_in; ++ifm) - { - // Compute the offset for the input slice - const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in; - - // Compute 2D convolution - for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk) - { - for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk) - { - // Check if the pixel is out-of-bound - if(is_valid_pixel(xi + xk * dilation_x, 0, width_in) && is_valid_pixel(yi + yk * dilation_y, 0, height_in)) - { - const int idx = xk + half_width_weights_start; - const int idy = yk + half_height_weights_start; - - const fixed_point i_value(in_ptr[offset_slice_in + xk * dilation_x + yk * dilation_y * width_in], fixed_point_position, true); - const fixed_point w_value(w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true); - const fixed_point iw = i_value * w_value; - acc = iw + acc; - } - } - } - } - - // Get the bias - const fixed_point b(*b_ptr, fixed_point_position, true); - - // Accumulate the bias and covert back - acc = acc + b; - fixed_point res(acc); - *out_ptr = res.raw(); -} - -// 3D convolution for QASYMM8 type -template <> -inline void convolution3d(const SimpleTensor &in, const SimpleTensor &weights, const SimpleTensor &bias, SimpleTensor &out, - int i_offset, int w_offset, int b_offset, int o_offset, - int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int dilation_x, int dilation_y) -{ - const uint8_t *in_ptr = in.data() + i_offset; - const uint8_t *w_ptr = weights.data() + w_offset; - const int32_t *b_ptr = bias.data() + b_offset; - uint8_t *out_ptr = out.data() + o_offset; + const T *in_ptr = in.data() + i_offset; + const T *w_ptr = weights.data() + w_offset; + const TB *b_ptr = bias.data() + b_offset; + T *out_ptr = out.data() + o_offset; const int input_offset = -in.quantization_info().offset; const float input_scale = in.quantization_info().scale; diff --git a/tests/validation/reference/ConvolutionLayer.cpp b/tests/validation/reference/ConvolutionLayer.cpp index 00c839d2df..e212e2742f 100644 --- a/tests/validation/reference/ConvolutionLayer.cpp +++ b/tests/validation/reference/ConvolutionLayer.cpp @@ -23,7 +23,6 @@ */ #include "ConvolutionLayer.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" #include "tests/validation/reference/Convolution3d.h" #include "tests/validation/reference/Permute.h" diff --git a/tests/validation/reference/DeconvolutionLayer.cpp b/tests/validation/reference/DeconvolutionLayer.cpp index d073bbf7a1..e73023e419 100644 --- a/tests/validation/reference/DeconvolutionLayer.cpp +++ b/tests/validation/reference/DeconvolutionLayer.cpp @@ -23,7 +23,6 @@ */ #include "ConvolutionLayer.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/DepthConcatenateLayer.cpp b/tests/validation/reference/DepthConcatenateLayer.cpp index c9a23520c7..dbcd575e9a 100644 --- a/tests/validation/reference/DepthConcatenateLayer.cpp +++ b/tests/validation/reference/DepthConcatenateLayer.cpp @@ -23,7 +23,6 @@ */ #include "DepthConcatenateLayer.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/DepthConvertLayer.cpp b/tests/validation/reference/DepthConvertLayer.cpp index 022007720a..6f90963360 100644 --- a/tests/validation/reference/DepthConvertLayer.cpp +++ b/tests/validation/reference/DepthConvertLayer.cpp @@ -23,7 +23,6 @@ */ #include "DepthConvertLayer.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" #include "tests/Types.h" @@ -61,33 +60,6 @@ SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, Con return result; } -template < typename T1, typename T2, typename std::enable_if < std::is_integral::value &&std::is_integral::value &&std::is_same::value, int >::type > -SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_UNUSED(policy); - - using namespace fixed_point_arithmetic; - - SimpleTensor result(src.shape(), dt_out); - - bool is_in_place = (&src == &result); - - const int fixed_point_position_in = src.fixed_point_position(); - const int fixed_point_position_out = (is_in_place) ? static_cast(shift) : result.fixed_point_position(); - - if(!is_in_place || (fixed_point_position_in != fixed_point_position_out)) - { - for(int i = 0; i < src.num_elements(); ++i) - { - auto x = fixed_point(src[i], fixed_point_position_in, true); - x.resacle(fixed_point_position_out); - result[i] = x.raw(); - } - } - - return result; -} - template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp index d8f3cbae49..39429e2449 100644 --- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp +++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp @@ -26,7 +26,6 @@ #include "ConvolutionLayer.h" #include "Utils.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" #include "tests/validation/reference/Utils.h" #include "tests/validation/reference/UtilsQuantizedAsymm.h" diff --git a/tests/validation/reference/FixedPoint.cpp b/tests/validation/reference/FixedPoint.cpp deleted file mode 100644 index a016093ed6..0000000000 --- a/tests/validation/reference/FixedPoint.cpp +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "FixedPoint.h" - -#include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" -#include "tests/validation/Helpers.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace reference -{ -template -SimpleTensor fixed_point_operation(const SimpleTensor &src, FixedPointOp op) -{ - SimpleTensor result(src.shape(), src.data_type()); - - const int p = src.fixed_point_position(); - switch(op) - { - case FixedPointOp::EXP: - for(int i = 0; i < src.num_elements(); ++i) - { - result[i] = fixed_point_arithmetic::exp(fixed_point_arithmetic::fixed_point(src[i], p, true)).raw(); - } - break; - case FixedPointOp::LOG: - for(int i = 0; i < src.num_elements(); ++i) - { - result[i] = fixed_point_arithmetic::log(fixed_point_arithmetic::fixed_point(src[i], p, true)).raw(); - } - break; - case FixedPointOp::INV_SQRT: - for(int i = 0; i < src.num_elements(); ++i) - { - result[i] = fixed_point_arithmetic::inv_sqrt(fixed_point_arithmetic::fixed_point(src[i], p, true)).raw(); - } - break; - case FixedPointOp::RECIPROCAL: - for(int i = 0; i < src.num_elements(); ++i) - { - result[i] = fixed_point_arithmetic::div(fixed_point_arithmetic::fixed_point(1, p), fixed_point_arithmetic::fixed_point(src[i], p, true)).raw(); - } - break; - default: - ARM_COMPUTE_ERROR("Fixed point operation not supported"); - break; - } - - return result; -} - -template SimpleTensor fixed_point_operation(const SimpleTensor &src, FixedPointOp op); -template SimpleTensor fixed_point_operation(const SimpleTensor &src, FixedPointOp op); -} // namespace reference -} // namespace validation -} // namespace test -} // namespace arm_compute diff --git a/tests/validation/reference/FixedPoint.h b/tests/validation/reference/FixedPoint.h deleted file mode 100644 index f0117f9dd0..0000000000 --- a/tests/validation/reference/FixedPoint.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_TEST_FIXED_POINT_OPERATION_H__ -#define __ARM_COMPUTE_TEST_FIXED_POINT_OPERATION_H__ - -#include "tests/SimpleTensor.h" -#include "tests/Types.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace reference -{ -template -SimpleTensor fixed_point_operation(const SimpleTensor &src, FixedPointOp op); -} // namespace reference -} // namespace validation -} // namespace test -} // namespace arm_compute -#endif /* __ARM_COMPUTE_TEST_FIXED_POINT_OPERATION_H__ */ diff --git a/tests/validation/reference/FlattenLayer.cpp b/tests/validation/reference/FlattenLayer.cpp index e140d752a0..381ce37051 100644 --- a/tests/validation/reference/FlattenLayer.cpp +++ b/tests/validation/reference/FlattenLayer.cpp @@ -23,8 +23,6 @@ */ #include "FlattenLayer.h" -#include "tests/validation/FixedPoint.h" - namespace arm_compute { namespace test diff --git a/tests/validation/reference/FullyConnectedLayer.cpp b/tests/validation/reference/FullyConnectedLayer.cpp index 3ef10eacea..d65d0caab0 100644 --- a/tests/validation/reference/FullyConnectedLayer.cpp +++ b/tests/validation/reference/FullyConnectedLayer.cpp @@ -24,7 +24,6 @@ #include "FullyConnectedLayer.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/reference/UtilsQuantizedAsymm.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp index 7378ada4ab..2feab89950 100644 --- a/tests/validation/reference/GEMM.cpp +++ b/tests/validation/reference/GEMM.cpp @@ -24,7 +24,6 @@ #include "GEMM.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" namespace arm_compute { @@ -85,75 +84,6 @@ SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const S return dst; } -template ::value, int>::type> -SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta) -{ - using namespace fixed_point_arithmetic; - - // Create reference - SimpleTensor dst{ c.shape(), c.data_type(), 1 }; - - // Compute reference - using promoted_type = fixed_point_arithmetic::traits::promote_t; - - const int M = dst.shape().y(); - const int N = dst.shape().x(); - const int K = a.shape().x(); - const int D = a.shape().z(); // Number of matrices in a batch - const int W = a.shape()[3]; // Number of batched-gemm (Winograd case) - - const int a_stride_z = K * M; - const int a_stride_w = K * M * D; - - const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions - const int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions - - const int c_stride_z = N * M; - const int c_stride_w = N * M * D; - - const int fixed_point_position = a.fixed_point_position(); - const fixed_point alpha_q(alpha, fixed_point_position); - const fixed_point beta_q(beta, fixed_point_position); - - for(int w = 0; w < W; ++w) - { - for(int depth = 0; depth < D; ++depth) - { - const int base_addr_a = depth * a_stride_z + w * a_stride_w; - const int base_addr_b = depth * b_stride_z + w * b_stride_w; - const int base_addr_c = depth * c_stride_z + w * c_stride_w; - - for(int row = 0; row < M; ++row) - { - for(int col = 0; col < N; ++col) - { - fixed_point acc_q(0, fixed_point_position); - - for(int k = 0; k < K; ++k) - { - const fixed_point a0_q(a[base_addr_a + row * K + k], fixed_point_position, true); - const fixed_point b0_q(b[base_addr_b + k * N + col], fixed_point_position, true); - - acc_q = acc_q + (a0_q * b0_q); - } - - // Finalize the result: alpha * A * B + beta * C - const fixed_point c0_q(c[base_addr_c + col + row * N], fixed_point_position, true); - - fixed_point res_q(acc_q); - res_q = alpha_q * res_q; - res_q = res_q + (beta_q * c0_q); - - // Store the result - dst[base_addr_c + col + row * N] = res_q.raw(); - } - } - } - } - - return dst; -} - template SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); template SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); } // namespace reference diff --git a/tests/validation/reference/GEMM.h b/tests/validation/reference/GEMM.h index cda792bf8b..39007c60bc 100644 --- a/tests/validation/reference/GEMM.h +++ b/tests/validation/reference/GEMM.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -38,8 +38,6 @@ namespace reference template ::value, int>::type = 0> SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); -template ::value, int>::type = 0> -SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/GEMMInterleave4x4.h b/tests/validation/reference/GEMMInterleave4x4.h index e6b09afb9a..e3d72d91aa 100644 --- a/tests/validation/reference/GEMMInterleave4x4.h +++ b/tests/validation/reference/GEMMInterleave4x4.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,7 +24,6 @@ #include "GEMM.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" namespace arm_compute { diff --git a/tests/validation/reference/GEMMInterleaveBlocked.h b/tests/validation/reference/GEMMInterleaveBlocked.h index ff5a0d647c..d649a512e3 100644 --- a/tests/validation/reference/GEMMInterleaveBlocked.h +++ b/tests/validation/reference/GEMMInterleaveBlocked.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,7 +24,6 @@ #include "GEMM.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" namespace arm_compute { diff --git a/tests/validation/reference/GEMMTranspose1xW.h b/tests/validation/reference/GEMMTranspose1xW.h index d6a2e89176..6ec70b1067 100644 --- a/tests/validation/reference/GEMMTranspose1xW.h +++ b/tests/validation/reference/GEMMTranspose1xW.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,7 +24,6 @@ #include "GEMM.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" namespace arm_compute { diff --git a/tests/validation/reference/NormalizationLayer.cpp b/tests/validation/reference/NormalizationLayer.cpp index 85872c8f90..2ae68c63cf 100644 --- a/tests/validation/reference/NormalizationLayer.cpp +++ b/tests/validation/reference/NormalizationLayer.cpp @@ -24,7 +24,6 @@ #include "NormalizationLayer.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" namespace arm_compute { @@ -146,125 +145,6 @@ SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLay return dst; } -template ::value, int>::type> -SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info) -{ - using namespace fixed_point_arithmetic; - - // Create reference - SimpleTensor dst{ src.shape(), src.data_type(), 1 }; - - // Compute reference - const int fixed_point_position = src.fixed_point_position(); - - const uint32_t norm_size = info.norm_size(); - NormType type = info.type(); - fixed_point beta(info.beta(), fixed_point_position); - fixed_point kappa(info.kappa(), fixed_point_position); - - const int cols = src.shape()[0]; - const int rows = src.shape()[1]; - const int depth = src.shape()[2]; - int upper_dims = src.shape().total_size() / (cols * rows); - - fixed_point coeff(info.scale_coeff(), fixed_point_position); - int radius_cols = norm_size / 2; - - // IN_MAP_1D and CROSS_MAP normalize over a single axis only - int radius_rows = (NormType::IN_MAP_2D == type) ? norm_size / 2 : 0; - - if(type == NormType::CROSS_MAP) - { - // Remove also depth from upper dimensions since it is the dimension we - // want to use for normalization - upper_dims /= depth; - - for(int r = 0; r < upper_dims; ++r) - { - for(int i = 0; i < rows; ++i) - { - for(int k = 0; k < cols; ++k) - { - for(int l = 0; l < depth; ++l) - { - fixed_point accumulated_scale(0.f, fixed_point_position); - - for(int j = -radius_cols; j <= radius_cols; ++j) - { - const int z = l + j; - - if(z >= 0 && z < depth) - { - const T value = src[k + i * cols + z * rows * cols + r * cols * rows * depth]; - const fixed_point fp_value(value, fixed_point_position, true); - accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value)); - } - } - - accumulated_scale = add(kappa, mul(accumulated_scale, coeff)); - dst[k + i * cols + l * rows * cols + r * cols * rows * depth] = accumulated_scale.raw(); - } - } - } - } - } - else - { - for(int r = 0; r < upper_dims; ++r) - { - for(int i = 0; i < rows; ++i) - { - for(int k = 0; k < cols; ++k) - { - fixed_point accumulated_scale(0.f, fixed_point_position); - - for(int j = -radius_rows; j <= radius_rows; ++j) - { - const int y = i + j; - - for(int l = -radius_cols; l <= radius_cols; ++l) - { - const int x = k + l; - - if((x >= 0 && y >= 0) && (x < cols && y < rows)) - { - const T value = src[x + y * cols + r * cols * rows]; - const fixed_point fp_value(value, fixed_point_position, true); - accumulated_scale = add(accumulated_scale, mul(fp_value, fp_value)); - } - } - } - - accumulated_scale = add(kappa, mul(accumulated_scale, coeff)); - dst[k + i * cols + r * cols * rows] = accumulated_scale.raw(); - } - } - } - } - - if(info.beta() == 1.f) - { - for(int i = 0; i < dst.num_elements(); ++i) - { - fixed_point res = div(fixed_point(src[i], fixed_point_position, true), fixed_point(dst[i], fixed_point_position, true)); - dst[i] = res.raw(); - } - } - else - { - const fixed_point beta(info.beta(), fixed_point_position); - - for(int i = 0; i < dst.num_elements(); ++i) - { - fixed_point res = pow(fixed_point(dst[i], fixed_point_position, true), beta); - res = div(fixed_point(src[i], fixed_point_position, true), res); - dst[i] = res.raw(); - } - } - - return dst; -} - template SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); template SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); } // namespace reference diff --git a/tests/validation/reference/NormalizationLayer.h b/tests/validation/reference/NormalizationLayer.h index 3f624ff30a..3448baf385 100644 --- a/tests/validation/reference/NormalizationLayer.h +++ b/tests/validation/reference/NormalizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -38,8 +38,6 @@ namespace reference template ::value, int>::type = 0> SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); -template ::value, int>::type = 0> -SimpleTensor normalization_layer(const SimpleTensor &src, NormalizationLayerInfo info); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/PixelWiseMultiplication.cpp b/tests/validation/reference/PixelWiseMultiplication.cpp index 7304fb0673..859da5ce59 100644 --- a/tests/validation/reference/PixelWiseMultiplication.cpp +++ b/tests/validation/reference/PixelWiseMultiplication.cpp @@ -23,8 +23,6 @@ */ #include "PixelWiseMultiplication.h" -#include "tests/validation/FixedPoint.h" - namespace arm_compute { namespace test diff --git a/tests/validation/reference/PoolingLayer.cpp b/tests/validation/reference/PoolingLayer.cpp index e9054b9043..02c430a64f 100644 --- a/tests/validation/reference/PoolingLayer.cpp +++ b/tests/validation/reference/PoolingLayer.cpp @@ -25,7 +25,6 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute @@ -44,7 +43,7 @@ SimpleTensor pooling_layer(const SimpleTensor &src, const PoolingLayerInfo ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y())); // Create reference - SimpleTensor dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type(), src.fixed_point_position()), info), src.data_type(), 1 }; + SimpleTensor dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type()), info), src.data_type(), 1 }; const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width; const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height; diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp index ae4bcd8f0e..aa640ad5e6 100644 --- a/tests/validation/reference/SoftmaxLayer.cpp +++ b/tests/validation/reference/SoftmaxLayer.cpp @@ -24,7 +24,6 @@ #include "SoftmaxLayer.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" namespace arm_compute { @@ -71,63 +70,21 @@ SimpleTensor softmax_layer(const SimpleTensor &src, float beta) return dst; } -template ::value, int>::type> +template ::value, int>::type> SimpleTensor softmax_layer(const SimpleTensor &src, float beta) -{ - ARM_COMPUTE_UNUSED(beta); - - using namespace fixed_point_arithmetic; - - // Create reference - SimpleTensor dst{ src.shape(), src.data_type(), 1 }; - - // Compute reference - const int cols = src.shape()[0]; - const int upper_dims = src.num_elements() / cols; - - for(int r = 0; r < upper_dims; ++r) - { - const T *src_row_ptr = src.data() + r * cols; - T *dst_row_ptr = dst.data() + r * cols; - - // Find max - const fixed_point max(*std::max_element(src_row_ptr, src_row_ptr + cols), src.fixed_point_position(), true); - - // Regularize - using promoted_type = fixed_point_arithmetic::traits::promote_t; - fixed_point sum(0, src.fixed_point_position(), true); - std::transform(src_row_ptr, src_row_ptr + cols, dst_row_ptr, [&](T val) - { - const fixed_point res = exp(fixed_point(val, src.fixed_point_position(), true) - max); - sum = add(sum, fixed_point(res.raw(), src.fixed_point_position(), true)); - return res.raw(); - }); - - // Normalize - fixed_point saturated_sum(sum); - std::transform(dst_row_ptr, dst_row_ptr + cols, dst_row_ptr, [&](T val) - { - return div(fixed_point(val, src.fixed_point_position(), true), saturated_sum).raw(); - }); - } - - return dst; -} - -template <> -SimpleTensor softmax_layer(const SimpleTensor &src, float beta) { // Note: Output quantization info should always have scale = 1/256 and offset = 0 const QuantizationInfo output_quantization_info = QuantizationInfo(1.f / 256, 0); - SimpleTensor src_tmp = convert_from_asymmetric(src); - SimpleTensor dst_tmp = softmax_layer(src_tmp, beta); - SimpleTensor dst = convert_to_asymmetric(dst_tmp, output_quantization_info); + SimpleTensor src_tmp = convert_from_asymmetric(src); + SimpleTensor dst_tmp = softmax_layer(src_tmp, beta); + SimpleTensor dst = convert_to_asymmetric(dst_tmp, output_quantization_info); return dst; } template SimpleTensor softmax_layer(const SimpleTensor &src, float beta); template SimpleTensor softmax_layer(const SimpleTensor &src, float beta); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/SoftmaxLayer.h b/tests/validation/reference/SoftmaxLayer.h index a6d4c3b8cf..21dca1e52b 100644 --- a/tests/validation/reference/SoftmaxLayer.h +++ b/tests/validation/reference/SoftmaxLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -38,7 +38,7 @@ namespace reference template ::value, int>::type = 0> SimpleTensor softmax_layer(const SimpleTensor &src, float beta); -template ::value, int>::type = 0> +template ::value, int>::type = 0> SimpleTensor softmax_layer(const SimpleTensor &src, float beta); } // namespace reference } // namespace validation diff --git a/tests/validation/reference/Transpose.cpp b/tests/validation/reference/Transpose.cpp index 736f37e4dc..348c7030cb 100644 --- a/tests/validation/reference/Transpose.cpp +++ b/tests/validation/reference/Transpose.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,7 +24,6 @@ #include "Transpose.h" #include "arm_compute/core/Types.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute diff --git a/tests/validation/reference/WidthConcatenateLayer.cpp b/tests/validation/reference/WidthConcatenateLayer.cpp index 5b89934df5..7a5ece8f5e 100644 --- a/tests/validation/reference/WidthConcatenateLayer.cpp +++ b/tests/validation/reference/WidthConcatenateLayer.cpp @@ -23,7 +23,6 @@ */ #include "WidthConcatenateLayer.h" -#include "tests/validation/FixedPoint.h" #include "tests/validation/Helpers.h" namespace arm_compute -- cgit v1.2.1