aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2020-10-26 15:04:08 +0000
committerGiorgio Arena <giorgio.arena@arm.com>2020-11-12 12:42:51 +0000
commit2d1a835b68eb27a800838fc2b563b12eddf2c19f (patch)
tree228dee073d37d2ec5b5dfbdb3d0e1e512ecb2d22 /tests
parent00c7601b1f9c3bec1d3b1db844abb513b9012541 (diff)
downloadComputeLibrary-2d1a835b68eb27a800838fc2b563b12eddf2c19f.tar.gz
COMPMID-3735 Remove OpenCL padding: CLSoftmaxLayerKernel
- Renamed SELECT_DATA_TYPE to SELECT_VEC_DATA_TYPE to reflect its usage with vectors. SELECT_DATA_TYPE(dt) will now return the primitive data type - Changed the interface of VEC_OFFS and V_OFFS in order to receive the primitive data type as a parameter rather than its vector form - Performed a general cleanup of the kernels, such as creating macro for sum and max reduces, remove reduntant macros, defines, variables, calculations, etc... - Using VEC_SIZE and VEC_SIZE_LEFTOVER in every kernel in order to allow computation for smaller shapes without adding paddings - Removed the actual padding from the kernel and adjusting its calculations accordingly. Added asserts for padding removal checks. Removed invalid Validate tests. Change-Id: If5ccbd5d34e255d38c7f6bfe8740e2b80b28e264 Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4277 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/SoftmaxLayer.cpp10
1 files changed, 1 insertions, 9 deletions
diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp
index fe31b00e00..396e274e0b 100644
--- a/tests/validation/CL/SoftmaxLayer.cpp
+++ b/tests/validation/CL/SoftmaxLayer.cpp
@@ -69,8 +69,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Mismatching shapes
TensorInfo(TensorShape(27U, 13U), 1, DataType::QASYMM8, // Invalid output quantization info
QuantizationInfo(1.f/256, 12)),
- TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Window shrink
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),// Invalid input dimensionality
TensorInfo(TensorShape(32U, 13U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8,
QuantizationInfo(1.f/256, 12)),
@@ -85,8 +83,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(27U, 11U), 1, DataType::F32),
TensorInfo(TensorShape(27U, 13U), 1, DataType::QASYMM8,
QuantizationInfo(1.f/256, 12)),
- TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8,
QuantizationInfo(1.f/256, 0)),
@@ -105,22 +101,18 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
2.0,
1.0,
2.0,
- 1.0,
- 2.0,
})),
framework::dataset::make("axis", {
0,
0,
0,
- 0,
- 0,
1,
0,
-1,
2,
-3,
})),
- framework::dataset::make("Expected", { false, false, false, false, false, true, true, true, false, false })),
+ framework::dataset::make("Expected", { false, false, false, true, true, true, false, false })),
input_info, output_info, beta, axis, expected)
{
ARM_COMPUTE_EXPECT(bool(CLSoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), beta, axis)) == expected, framework::LogLevel::ERRORS);