aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2020-01-08 16:29:15 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2020-01-14 15:31:35 +0000
commit70d33bdfd36e1b44b0573189dca67ed7c63dd59e (patch)
tree61873b03cdc8eb1cd3544a00d3cef4eefccd5b0f
parent4715cf9da26c4e914b9528f736e77d6773285169 (diff)
downloadComputeLibrary-70d33bdfd36e1b44b0573189dca67ed7c63dd59e.tar.gz
COMPMID-2755: update CLConvolutionLayer's doxygen and test for QASYMM8_SIGNED
Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com> Change-Id: Ida6ebd2c1ed46d038e13bfbea0306de660dd147b Reviewed-on: https://review.mlplatform.org/c/2585 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
-rw-r--r--arm_compute/runtime/CL/functions/CLConvolutionLayer.h8
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp33
2 files changed, 24 insertions, 17 deletions
diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
index 66dc7af003..b52695463a 100644
--- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -77,7 +77,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QASYMM8/F16/F32.
+ * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
* Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
@@ -98,7 +98,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QASYMM8/F16/F32.
+ * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
* Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported:Same as @p input.
@@ -121,7 +121,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QASYMM8/F16/F32.
+ * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
* Data type supported: Same as @p input or QASYMM8/QSYMM8_PER_CHANNEL when @p input is QASYMM8.
* @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 130af572ff..0d8a322694 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -87,14 +87,15 @@ TEST_SUITE(ConvolutionLayer)
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
- TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
- TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Select GEMM
- TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32), // Select WINOGRAD
- TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), // Select GEMM
- TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32), // Select GEMM
- TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32), // Select WINOGRAD
- TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32) // Select GEMM
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
+ TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
+ TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Select GEMM
+ TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32), // Select WINOGRAD
+ TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), // Select GEMM
+ TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32), // Select GEMM
+ TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32), // Select WINOGRAD
+ TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
+ TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::QASYMM8_SIGNED), // Select GEMM
}),
framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
@@ -103,7 +104,8 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16),
TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32),
- TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32)
+ TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::QASYMM8_SIGNED),
})),
framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
@@ -112,7 +114,8 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32),
TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
- TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32)
+ TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
+ TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::QASYMM8_SIGNED),
})),
framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
PadStrideInfo(1, 2, 1, 1),
@@ -121,7 +124,8 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
PadStrideInfo(2, 1, 0, 0),
PadStrideInfo(3, 2, 1, 0),
PadStrideInfo(1, 1, 2, 2),
- PadStrideInfo(1, 1, 2, 2)
+ PadStrideInfo(1, 1, 2, 2),
+ PadStrideInfo(1, 1, 2, 2),
})),
framework::dataset::make("GpuTarget", { GPUTarget::BIFROST,
GPUTarget::MIDGARD,
@@ -130,7 +134,8 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
GPUTarget::MIDGARD,
GPUTarget::BIFROST,
GPUTarget::BIFROST,
- GPUTarget::BIFROST
+ GPUTarget::BIFROST,
+ GPUTarget::BIFROST,
})),
framework::dataset::make("Dilation", { Size2D(1U, 1U),
Size2D(1U, 1U),
@@ -140,8 +145,9 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
Size2D(1U, 1U),
Size2D(1U, 1U),
Size2D(2U, 1U),
+ Size2D(2U, 1U),
})),
- framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true })),
+ framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true, true })),
framework::dataset::make("Expected",{ ConvolutionMethod::GEMM,
ConvolutionMethod::GEMM,
ConvolutionMethod::GEMM,
@@ -150,6 +156,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z
ConvolutionMethod::GEMM,
ConvolutionMethod::WINOGRAD,
ConvolutionMethod::GEMM,
+ ConvolutionMethod::GEMM,
})),
input_info, weights_info, output_info, conv_info, gpu_target, dilation, enable_fast_math, expected)
{