aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/fixtures')
-rw-r--r--tests/validation/fixtures/ActivationLayerFixture.h6
-rw-r--r--tests/validation/fixtures/AddMulAddFixture.h19
-rw-r--r--tests/validation/fixtures/ArgMinMaxFixture.h14
-rw-r--r--tests/validation/fixtures/ArithmeticOperationsFixture.h14
-rw-r--r--tests/validation/fixtures/BatchNormalizationLayerFixture.h15
-rw-r--r--tests/validation/fixtures/BoundingBoxTransformFixture.h14
-rw-r--r--tests/validation/fixtures/CastFixture.h14
-rw-r--r--tests/validation/fixtures/ChannelShuffleLayerFixture.h14
-rw-r--r--tests/validation/fixtures/ComparisonFixture.h14
-rw-r--r--tests/validation/fixtures/ComputeAllAnchorsFixture.h14
-rw-r--r--tests/validation/fixtures/ConcatenateLayerFixture.h14
-rw-r--r--tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h14
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h193
-rw-r--r--tests/validation/fixtures/CpuActivationFixture.h200
-rw-r--r--tests/validation/fixtures/CpuArithmeticOperationsFixture.h208
-rw-r--r--tests/validation/fixtures/CpuDepthwiseConv2dFixture.h905
-rw-r--r--tests/validation/fixtures/CpuElementwiseFixture.h233
-rw-r--r--tests/validation/fixtures/CpuGemmAssemblyDispatchFixture.h238
-rw-r--r--tests/validation/fixtures/CpuGemmConv2dFixture.h165
-rw-r--r--tests/validation/fixtures/CpuGemmDirectConv2dFixture.h166
-rw-r--r--tests/validation/fixtures/CpuMulFixture.h207
-rw-r--r--tests/validation/fixtures/CpuWinogradConv2dFixture.h211
-rw-r--r--tests/validation/fixtures/CropResizeFixture.h14
-rw-r--r--tests/validation/fixtures/DeconvolutionLayerFixture.h38
-rw-r--r--tests/validation/fixtures/DepthConvertLayerFixture.h14
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h10
-rw-r--r--tests/validation/fixtures/DequantizationLayerFixture.h14
-rw-r--r--tests/validation/fixtures/DirectConvolution3DFixture.h6
-rw-r--r--tests/validation/fixtures/DirectConvolutionLayerFixture.h14
-rw-r--r--tests/validation/fixtures/ElementwiseOperationsFixture.h9
-rw-r--r--tests/validation/fixtures/ElementwiseUnaryFixture.h14
-rw-r--r--tests/validation/fixtures/FlattenLayerFixture.h14
-rw-r--r--tests/validation/fixtures/FloorFixture.h14
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h12
-rw-r--r--tests/validation/fixtures/FuseBatchNormalizationFixture.h14
-rw-r--r--tests/validation/fixtures/GEMMFixture.h6
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h165
-rw-r--r--tests/validation/fixtures/Im2ColFixture.h14
-rw-r--r--tests/validation/fixtures/InstanceNormalizationLayerFixture.h14
-rw-r--r--tests/validation/fixtures/L2NormalizeLayerFixture.h14
-rw-r--r--tests/validation/fixtures/LSTMLayerFixture.h57
-rw-r--r--tests/validation/fixtures/MatMulFixture.h6
-rw-r--r--tests/validation/fixtures/MaxUnpoolingLayerFixture.h14
-rw-r--r--tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h14
-rw-r--r--tests/validation/fixtures/NormalizationLayerFixture.h14
-rw-r--r--tests/validation/fixtures/PadLayerFixture.h14
-rw-r--r--tests/validation/fixtures/PixelWiseMultiplicationFixture.h15
-rw-r--r--tests/validation/fixtures/Pooling3dLayerFixture.h14
-rw-r--r--tests/validation/fixtures/PoolingLayerFixture.h14
-rw-r--r--tests/validation/fixtures/QuantizationLayerFixture.h14
-rw-r--r--tests/validation/fixtures/RNNLayerFixture.h14
-rw-r--r--tests/validation/fixtures/ROIAlignLayerFixture.h14
-rw-r--r--tests/validation/fixtures/RangeFixture.h14
-rw-r--r--tests/validation/fixtures/ReduceMeanFixture.h14
-rw-r--r--tests/validation/fixtures/ReductionOperationFixture.h14
-rw-r--r--tests/validation/fixtures/ReverseFixture.h8
-rw-r--r--tests/validation/fixtures/ScaleFixture.h8
-rw-r--r--tests/validation/fixtures/ScatterLayerFixture.h1
-rw-r--r--tests/validation/fixtures/SelectFixture.h14
-rw-r--r--tests/validation/fixtures/SliceOperationsFixtures.h20
-rw-r--r--tests/validation/fixtures/SoftmaxLayerFixture.h14
-rw-r--r--tests/validation/fixtures/SplitFixture.h20
-rw-r--r--tests/validation/fixtures/UnstackFixture.h14
-rw-r--r--tests/validation/fixtures/WinogradConvolutionLayerFixture.h24
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h1
65 files changed, 3471 insertions, 191 deletions
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index a24ba8913e..d3e8bf09f2 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -50,6 +50,12 @@ public:
void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
ActivationLayerInfo info(function, alpha_beta, alpha_beta);
_in_place = in_place;
diff --git a/tests/validation/fixtures/AddMulAddFixture.h b/tests/validation/fixtures/AddMulAddFixture.h
index d13fef2f02..788e1c974f 100644
--- a/tests/validation/fixtures/AddMulAddFixture.h
+++ b/tests/validation/fixtures/AddMulAddFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -144,8 +144,15 @@ public:
void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo act_info)
{
- Parent::setup(shape, data_type, act_info, interm_out);
- compute_reference(shape, data_type, act_info);
+ const bool is_not_cpu = !std::is_same<TensorType, Tensor>::value;
+ const bool is_not_fp16 = data_type != DataType::F16;
+ const bool device_has_fp16 = CPUInfo::get().has_fp16();
+
+ if(is_not_cpu || is_not_fp16 || device_has_fp16)
+ {
+ Parent::setup(shape, data_type, act_info, interm_out);
+ compute_reference(shape, data_type, act_info);
+ }
}
// Compute Reference is moved outside of the generic fixture because with the quantized data types,
@@ -202,6 +209,12 @@ public:
QuantizationInfo input1_qinfo, QuantizationInfo input2_qinfo, QuantizationInfo bn_mul_qinfo,
QuantizationInfo bn_add_qinfo, QuantizationInfo add_output_qinfo, QuantizationInfo final_output_qinfo)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
// Quantization arguments moved to class attributes to prevent long function declerations
Parent::_input1_qinfo = input1_qinfo;
Parent::_input2_qinfo = input2_qinfo;
diff --git a/tests/validation/fixtures/ArgMinMaxFixture.h b/tests/validation/fixtures/ArgMinMaxFixture.h
index 7a823568a8..884b19260a 100644
--- a/tests/validation/fixtures/ArgMinMaxFixture.h
+++ b/tests/validation/fixtures/ArgMinMaxFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2023 Arm Limited.
+ * Copyright (c) 2018-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ARG_MIN_MAX_FIXTURE
-#define ARM_COMPUTE_TEST_ARG_MIN_MAX_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_ARGMINMAXFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_ARGMINMAXFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -48,6 +48,12 @@ class ArgMinMaxValidationBaseFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType input_type, DataType output_type, int axis, ReductionOperation op, QuantizationInfo q_info)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ input_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, input_type, output_type, axis, op, q_info);
_reference = compute_reference(shape, input_type, output_type, axis, op, q_info);
}
@@ -168,4 +174,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ARG_MIN_MAX_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_ARGMINMAXFIXTURE_H
diff --git a/tests/validation/fixtures/ArithmeticOperationsFixture.h b/tests/validation/fixtures/ArithmeticOperationsFixture.h
index 0785af1151..112d908a81 100644
--- a/tests/validation/fixtures/ArithmeticOperationsFixture.h
+++ b/tests/validation/fixtures/ArithmeticOperationsFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ARITHMETIC_OPERATIONS_FIXTURE
-#define ARM_COMPUTE_TEST_ARITHMETIC_OPERATIONS_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_ARITHMETICOPERATIONSFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_ARITHMETICOPERATIONSFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -48,6 +48,12 @@ public:
void setup(reference::ArithmeticOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type, ConvertPolicy convert_policy,
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, ActivationLayerInfo act_info, bool is_inplace)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_op = op;
_act_info = act_info;
_is_inplace = is_inplace;
@@ -284,4 +290,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ARITHMETIC_OPERATIONS_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_ARITHMETICOPERATIONSFIXTURE_H
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
index 54a0ed9e09..2374ecf64a 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_BATCHNORMALIZATIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_BATCHNORMALIZATIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -46,10 +46,15 @@ class BatchNormalizationLayerValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ dt == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_data_type = dt;
_use_beta = use_beta;
_use_gamma = use_gamma;
-
_target = compute_target(shape0, shape1, epsilon, act_info, dt, data_layout);
_reference = compute_reference(shape0, shape1, epsilon, act_info, dt);
}
@@ -165,4 +170,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_BATCHNORMALIZATIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/BoundingBoxTransformFixture.h b/tests/validation/fixtures/BoundingBoxTransformFixture.h
index 03edaeab16..84576335b0 100644
--- a/tests/validation/fixtures/BoundingBoxTransformFixture.h
+++ b/tests/validation/fixtures/BoundingBoxTransformFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_BOUNDINGBOXTRANSFORM_FIXTURE
-#define ARM_COMPUTE_TEST_BOUNDINGBOXTRANSFORM_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_BOUNDINGBOXTRANSFORMFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_BOUNDINGBOXTRANSFORMFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -104,6 +104,12 @@ public:
void setup(TensorShape deltas_shape, const BoundingBoxTransformInfo &info, DataType data_type, QuantizationInfo deltas_qinfo)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
const bool is_qasymm16 = data_type == DataType::QASYMM16;
_data_type_deltas = (is_qasymm16) ? DataType::QASYMM8 : data_type;
_boxes_qinfo = (is_qasymm16) ? QuantizationInfo(.125f, 0) : QuantizationInfo();
@@ -234,4 +240,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_BOUNDINGBOXTRANSFORM_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_BOUNDINGBOXTRANSFORMFIXTURE_H
diff --git a/tests/validation/fixtures/CastFixture.h b/tests/validation/fixtures/CastFixture.h
index e9d624e6f3..8297ec81dc 100644
--- a/tests/validation/fixtures/CastFixture.h
+++ b/tests/validation/fixtures/CastFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_CAST_FIXTURE
-#define ARM_COMPUTE_TEST_CAST_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CASTFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CASTFIXTURE_H
#include "tests/validation/fixtures/DepthConvertLayerFixture.h"
@@ -38,6 +38,12 @@ class CastValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ (dt_in == DataType::F16 || dt_out == DataType::F16) && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, dt_in, dt_out, policy);
_reference = compute_reference(shape, dt_in, dt_out, policy);
}
@@ -151,4 +157,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_CAST_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CASTFIXTURE_H
diff --git a/tests/validation/fixtures/ChannelShuffleLayerFixture.h b/tests/validation/fixtures/ChannelShuffleLayerFixture.h
index 530dba3893..63dfd62751 100644
--- a/tests/validation/fixtures/ChannelShuffleLayerFixture.h
+++ b/tests/validation/fixtures/ChannelShuffleLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_CHANNEL_SHUFFLE_FIXTURE
-#define ARM_COMPUTE_TEST_CHANNEL_SHUFFLE_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CHANNELSHUFFLELAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CHANNELSHUFFLELAYERFIXTURE_H
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorShape.h"
@@ -47,6 +47,12 @@ class ChannelShuffleLayerValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, unsigned int num_groups, DataType data_type, DataLayout data_layout)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, data_type, num_groups, data_layout);
_reference = compute_reference(shape, data_type, num_groups);
}
@@ -110,4 +116,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_CHANNEL_SHUFFLE_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CHANNELSHUFFLELAYERFIXTURE_H
diff --git a/tests/validation/fixtures/ComparisonFixture.h b/tests/validation/fixtures/ComparisonFixture.h
index f25d5abb73..b7c94e1c8a 100644
--- a/tests/validation/fixtures/ComparisonFixture.h
+++ b/tests/validation/fixtures/ComparisonFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_COMPARISON_FIXTURE
-#define ARM_COMPUTE_TEST_COMPARISON_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_COMPARISONFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_COMPARISONFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -46,6 +46,12 @@ class ComparisonValidationGenericFixture : public framework::Fixture
public:
void setup(ComparisonOperation op, const TensorShape &shape0, const TensorShape &shape1, DataType data_type, QuantizationInfo qinfo0, QuantizationInfo qinfo1)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(op, shape0, shape1, data_type, qinfo0, qinfo1);
_reference = compute_reference(op, shape0, shape1, data_type, qinfo0, qinfo1);
}
@@ -155,4 +161,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_COMPARISON_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_COMPARISONFIXTURE_H
diff --git a/tests/validation/fixtures/ComputeAllAnchorsFixture.h b/tests/validation/fixtures/ComputeAllAnchorsFixture.h
index 620f1b53fa..a0e712e567 100644
--- a/tests/validation/fixtures/ComputeAllAnchorsFixture.h
+++ b/tests/validation/fixtures/ComputeAllAnchorsFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_COMPUTEALLANCHORS_FIXTURE
-#define ARM_COMPUTE_TEST_COMPUTEALLANCHORS_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_COMPUTEALLANCHORSFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_COMPUTEALLANCHORSFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -46,6 +46,12 @@ class ComputeAllAnchorsGenericFixture : public framework::Fixture
public:
void setup(size_t num_anchors, const ComputeAnchorsInfo &info, DataType data_type, QuantizationInfo qinfo)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(num_anchors, data_type, info, qinfo);
_reference = compute_reference(num_anchors, data_type, info, qinfo);
}
@@ -124,4 +130,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_COMPUTEALLANCHORS_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_COMPUTEALLANCHORSFIXTURE_H
diff --git a/tests/validation/fixtures/ConcatenateLayerFixture.h b/tests/validation/fixtures/ConcatenateLayerFixture.h
index 3a021661ac..dab055b7b9 100644
--- a/tests/validation/fixtures/ConcatenateLayerFixture.h
+++ b/tests/validation/fixtures/ConcatenateLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CONCATENATELAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CONCATENATELAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -52,6 +52,12 @@ private:
public:
void setup(TensorShape shape, DataType data_type, unsigned int axis)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
// Create input shapes
std::mt19937 gen(library->seed());
std::uniform_int_distribution<> num_dis(2, 8);
@@ -170,4 +176,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CONCATENATELAYERFIXTURE_H
diff --git a/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h b/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h
index 7ad14e1b40..5e2f9a9c3d 100644
--- a/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h
+++ b/tests/validation/fixtures/ConvertFullyConnectedWeightsFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2023 Arm Limited.
+ * Copyright (c) 2018-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_CONVERT_FULLY_CONNECTED_WEIGHTS_FIXTURE
-#define ARM_COMPUTE_TEST_CONVERT_FULLY_CONNECTED_WEIGHTS_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CONVERTFULLYCONNECTEDWEIGHTSFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CONVERTFULLYCONNECTEDWEIGHTSFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -45,6 +45,12 @@ class ConvertFullyConnectedWeightsValidationFixture : public framework::Fixture
public:
void setup(TensorShape input_shape, unsigned int weights_w, DataLayout training_data_layout, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
const unsigned int height = input_shape.x() * input_shape.y() * input_shape.z();
const TensorShape weights_shape(weights_w, height);
@@ -128,4 +134,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_CONVERT_FULLY_CONNECTED_WEIGHTS_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CONVERTFULLYCONNECTEDWEIGHTSFIXTURE_H
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 2a317e9b9b..780ae7a522 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -32,6 +32,7 @@
#include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
#endif // ARM_COMPUTE_OPENCL_ENABLED
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
#include "src/graph/mutators/MutatorUtils.h"
#include "tests/AssetsLibrary.h"
@@ -123,8 +124,14 @@ public:
public:
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info,
- bool mixed_layout = false, PaddingList pre_pad_layer = PaddingList({}), bool padded_weights = false)
+ bool mixed_layout = false, PaddingList pre_pad_layer = PaddingList({}), bool padded_weights = false, bool updated_sq_info_after_config = false)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ (data_type == DataType::F16 || weights_data_type == DataType::F16) && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
// This hash is used by random generators. There may be hash collisions but
// this is intentional as it's a very easy way to make the the current
// random generation process almost different for many test configurations,
@@ -151,7 +158,15 @@ public:
_use_dynamic_output_quant = true;
}
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info, pre_pad_layer, padded_weights);
+ if (updated_sq_info_after_config)
+ {
+ _target = compute_gemmlowp_target_for_updated_sq_info_after_config(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info, pre_pad_layer, padded_weights);
+ }
+ else
+ {
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info, pre_pad_layer, padded_weights);
+ }
+
_reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info, pre_pad_layer);
}
@@ -204,7 +219,10 @@ protected:
{
if(_use_dynamic_output_quant)
{
- std::uniform_int_distribution<int32_t> distribution(-128, 127);
+ // Using -127 as the lower bound because of possible overflow.
+ // This is a known issue and reported in the errata.
+ // See COMPMID-7109 for more details
+ std::uniform_int_distribution<int32_t> distribution(-127, 127);
library->fill(tensor, distribution, i);
}
else
@@ -298,7 +316,6 @@ protected:
WeightsInfo weights_info(!reshape_weights, weights_shape[idx_width], weights_shape[idx_height], weights_shape[3]);
TensorShape reshaped_weights_shape(weights_shape);
- // Create tensors
TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info, _data_layout);
TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout);
TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, QuantizationInfo() /*bias is not a quantized type*/, _data_layout);
@@ -372,6 +389,124 @@ protected:
return dst;
}
+ // Compute the target when updating static quantization information after configuration.
+ TensorType compute_gemmlowp_target_for_updated_sq_info_after_config(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info,
+ bool reshape_weights, const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}), bool padded_weights = false)
+ {
+ ARM_COMPUTE_ASSERT((std::is_same<FunctionType, NEGEMMConvolutionLayer>::value == true));
+ ARM_COMPUTE_ERROR_ON((input_shape[2] % weights_shape[2]) != 0);
+
+ const unsigned int num_groups = input_shape[2] / weights_shape[2];
+
+ if(_data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+
+ if(pre_pad_layer.size() > 0)
+ {
+ // make sure paddings exist for each c,h,w dimensions
+ for(unsigned int i = 0; i < 3 - pre_pad_layer.size(); ++i)
+ {
+ pre_pad_layer.push_back({ 0, 0 });
+ }
+
+ // rotate padding info from nchw to nhwc
+ std::rotate(pre_pad_layer.begin(), pre_pad_layer.begin() + 2, pre_pad_layer.begin() + 3);
+ }
+ }
+
+ const int idx_width = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
+
+ WeightsInfo weights_info(!reshape_weights, weights_shape[idx_width], weights_shape[idx_height], weights_shape[3]);
+ TensorShape reshaped_weights_shape(weights_shape);
+
+ // Create tensors with fake quantization info and defer to pass the correct ones to a later stage.
+ auto qi = QuantizationInfo(0.550721, 37, true);
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, qi, _data_layout);
+ TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _weights_data_type, 1, qi, _data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, _output_data_type, 1, qi, _data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, QuantizationInfo() /*bias is not a quantized type*/, _data_layout);
+
+ // Create and configure function
+ FunctionType conv;
+
+ const unsigned int height_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::HEIGHT);
+ const unsigned int width_index = arm_compute::graph::get_dimension_idx(_data_layout, DataLayoutDimension::WIDTH);
+
+ const PaddingInfo pad_w = width_index < pre_pad_layer.size() ? pre_pad_layer[width_index] : PaddingInfo(0, 0);
+ const PaddingInfo pad_h = height_index < pre_pad_layer.size() ? pre_pad_layer[height_index] : PaddingInfo(0, 0);
+
+ if(pre_pad_layer.size() > 0 && arm_compute::graph::is_padding_in_height_or_width(_data_layout, pre_pad_layer))
+ {
+ // this is the logic implemented in NodeFusionMutator -> fuse_pad_with_convolution
+ const PadStrideInfo new_conv_info(
+ info.stride().first,
+ info.stride().second,
+ info.pad_left() + pad_w.first,
+ info.pad_right() + pad_w.second,
+ info.pad_top() + pad_h.first,
+ info.pad_bottom() + pad_h.second,
+ info.round());
+ detail::configure_conv_function(conv, &src, &weights, &bias, &dst, new_conv_info, weights_info, dilation, act_info, num_groups);
+ }
+ else
+ {
+ detail::configure_conv_function(conv, &src, &weights, &bias, &dst, info, weights_info, dilation, act_info, num_groups);
+ }
+
+ // After calling configure, we appropriately set the correct quantization info and update ACL.
+ src.info()->set_quantization_info(QuantizationInfo(_quantization_info.scale(), _quantization_info.offset(), true));
+ weights.info()->set_quantization_info(QuantizationInfo(_weight_quantization_info.scale(), _weight_quantization_info.offset(), true));
+ dst.info()->set_quantization_info(QuantizationInfo(_dst_q_info.scale(), _dst_q_info.offset(), true));
+
+ // propagate trough ACL the correct quantization info
+ NEGEMMConvolutionLayer *lp = reinterpret_cast<NEGEMMConvolutionLayer *>(&conv);
+ lp->update_quantization_parameters();
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ // Test "add padding after configure" behavior. This behavior should not affect the correctness
+ add_padding_x({ &src, &bias, &dst }, _data_layout);
+ // Padding weights may affect code path in some backends
+ if (padded_weights)
+ {
+ add_padding_x({ &weights }, _data_layout);
+ }
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ bias.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(src), 0 + _hash);
+ fill(AccessorType(weights), 1 + _hash);
+ fill(AccessorType(bias), 2 + _hash);
+
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute Convolution function
+ conv.run();
+ }
+
+ return dst;
+ }
+
SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
const Size2D &dilation, const ActivationLayerInfo act_info, PaddingList pre_pad_layer = PaddingList({}))
{
@@ -479,6 +614,44 @@ public:
}
};
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
+class ConvolutionValidationForUpdatedStaticQuantInfoAfterConfigureFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
+{
+public:
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type,
+ DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
+ {
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
+ data_type, data_type, data_layout, quantization_info, quantization_info, act_info, mixed_layout,
+ PaddingList({}), false, true);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
+class ConvolutionValidationQuantizedMixedTypeFixture
+ : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
+{
+public:
+ void setup(TensorShape input_shape,
+ TensorShape weights_shape,
+ TensorShape bias_shape,
+ TensorShape output_shape,
+ PadStrideInfo info,
+ Size2D dilation,
+ bool reshape_weights,
+ DataType data_type,
+ DataType weights_data_type,
+ DataLayout data_layout,
+ QuantizationInfo quantization_info,
+ QuantizationInfo weight_quantization_info,
+ ActivationLayerInfo act_info)
+ {
+ ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(
+ input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type,
+ weights_data_type, data_layout, quantization_info, weight_quantization_info, act_info);
+ }
+};
+
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
class ConvolutionValidationQuantizedPerChannelFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
{
@@ -597,6 +770,12 @@ public:
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, DataLayout data_layout,
const DataType data_type)
{
+ if(std::is_same<TensorClass, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
conv = std::make_unique<ConvolutionFunction>();
// prepare data
_data_layout = data_layout;
@@ -783,6 +962,12 @@ class HasOptImplFixture : public framework::Fixture
public:
void setup(DataType data_type, arm_compute::WeightFormat query_weight_format)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
auto conv = std::make_unique<ConvolutionClass>();
const auto src_info = TensorInfo(TensorShape(56U, 56U, 64U), 1, data_type, DataLayout::NHWC);
const auto weight_info = TensorInfo(TensorShape(64, 3U, 3U, 64U), 1, enable_fast_math ? DataType::BFLOAT16 : data_type, DataLayout::NHWC);
diff --git a/tests/validation/fixtures/CpuActivationFixture.h b/tests/validation/fixtures/CpuActivationFixture.h
new file mode 100644
index 0000000000..9e05db969a
--- /dev/null
+++ b/tests/validation/fixtures/CpuActivationFixture.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CPUACTIVATIONFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CPUACTIVATIONFIXTURE_H
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
+
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class CpuActivationValidationGenericFixture : public framework::Fixture
+{
+public:
+
+ void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
+ {
+ ActivationLayerInfo info(function, alpha_beta, alpha_beta);
+
+ _in_place = in_place;
+ _data_type = data_type;
+ // We are only testing fp32 datatype for CpuActivation wrapper. Hence,
+ // we can ignore quantization_info here and just use the default one.
+ _output_quantization_info = quantization_info;
+ _input_quantization_info = quantization_info;
+
+ _function = function;
+ _target = compute_target(shape, info);
+ _reference = compute_reference(shape, info);
+ }
+
+protected:
+ std::vector<T> get_boundary_values(T min, T max)
+ {
+ // This function will return a vector filled with the following values that can
+ // represent two partitions derived from equivalent partitioning.
+ // * Lower parition: min, min + delta, lower quarter (nominal), center - delta
+ // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max
+ const auto delta = is_data_type_float(_data_type) ? T(0.1f) : T(1);
+ const auto center_value = (min + max) / 2;
+ const auto lower_quarter = (min + center_value) / 2;
+ const auto upper_quarter = (center_value + max) / 2;
+
+ std::vector<T> boundary_values{};
+
+ // To ensure all the inserted values are within the given range after subtracing/adding delta
+ auto insert_values = [&boundary_values, &min, &max](const std::initializer_list<T> &new_values)
+ {
+ for(auto &v : new_values)
+ {
+ if(v >= min && v <= max)
+ {
+ boundary_values.emplace_back(v);
+ }
+ }
+ };
+
+ insert_values({ min, static_cast<T>(min + delta), static_cast<T>(lower_quarter), static_cast<T>(center_value - delta) }); // lower partition
+ insert_values({ static_cast<T>(center_value), static_cast<T>(center_value + delta), static_cast<T>(upper_quarter), static_cast<T>(max - delta), max }); // upper partition
+
+ return boundary_values;
+ }
+
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ if(is_data_type_float(_data_type))
+ {
+ float min_bound = 0;
+ float max_bound = 0;
+ std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type);
+ library->fill_static_values(tensor, get_boundary_values(static_cast<T>(min_bound), static_cast<T>(max_bound)));
+ }
+ else
+ {
+ PixelValue min{};
+ PixelValue max{};
+ std::tie(min, max) = get_min_max(tensor.data_type());
+ library->fill_static_values(tensor, get_boundary_values(min.get<T>(), max.get<T>()));
+ }
+ }
+
+ TensorType compute_target(const TensorShape &shape, ActivationLayerInfo info)
+ {
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info, DataLayout::NCHW);
+ TensorType dst = create_tensor<TensorType>(shape, _data_type, 1, _output_quantization_info, DataLayout::NCHW);
+
+ // Create and configure function
+ FunctionType act_layer;
+
+ TensorType *dst_ptr = _in_place ? &src : &dst;
+
+ if(!_in_place)
+ {
+ act_layer.configure(src.info(), dst.info(), info);
+ }
+ else {
+ act_layer.configure(src.info(), nullptr, info);
+ }
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+
+ if(!_in_place)
+ {
+ dst.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ }
+
+ // Fill tensors
+ fill(AccessorType(src));
+
+ // Compute function
+ ITensorPack run_pack{ { arm_compute::TensorType::ACL_SRC, &src }, { arm_compute::TensorType::ACL_DST, dst_ptr } };
+ act_layer.run(run_pack);
+
+ if(_in_place)
+ {
+ return src;
+ }
+ else
+ {
+ return dst;
+ }
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info)
+ {
+ // Create reference
+ SimpleTensor<T> src{ shape, _data_type, 1, _input_quantization_info };
+
+ // Fill reference
+ fill(src);
+
+ return reference::activation_layer<T>(src, info, _output_quantization_info);
+ }
+
+protected:
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ bool _in_place{};
+ QuantizationInfo _input_quantization_info{};
+ QuantizationInfo _output_quantization_info{};
+ DataType _data_type{};
+ ActivationLayerInfo::ActivationFunction _function{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class CpuActivationValidationFixture : public CpuActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type)
+ {
+ CpuActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, QuantizationInfo());
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CPUACTIVATIONFIXTURE_H
diff --git a/tests/validation/fixtures/CpuArithmeticOperationsFixture.h b/tests/validation/fixtures/CpuArithmeticOperationsFixture.h
new file mode 100644
index 0000000000..6abfe5803e
--- /dev/null
+++ b/tests/validation/fixtures/CpuArithmeticOperationsFixture.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CPUARITHMETICOPERATIONSFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CPUARITHMETICOPERATIONSFIXTURE_H
+
+#include "arm_compute/core/ITensorPack.h"
+#include "arm_compute/core/TensorShape.h"
+
+#include "tests/AssetsLibrary.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/ArithmeticOperations.h"
+
+#include <cstdint>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class CpuArithmeticOperationGenericFixture : public framework::Fixture
+{
+public:
+ void setup(reference::ArithmeticOperation op,
+ const TensorShape &shape0,
+ const TensorShape &shape1,
+ DataType data_type,
+ ConvertPolicy convert_policy,
+ QuantizationInfo qinfo0,
+ QuantizationInfo qinfo1,
+ QuantizationInfo qinfo_out,
+ ActivationLayerInfo act_info,
+ bool is_inplace)
+ {
+ if (std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
+ _op = op;
+ _act_info = act_info;
+ _is_inplace = is_inplace;
+ _target = compute_target(shape0, shape1, data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
+ _reference = compute_reference(shape0, shape1, data_type, convert_policy, qinfo0, qinfo1, qinfo_out);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+
+ TensorType compute_target(const TensorShape &shape0,
+ const TensorShape &shape1,
+ DataType data_type,
+ ConvertPolicy convert_policy,
+ QuantizationInfo qinfo0,
+ QuantizationInfo qinfo1,
+ QuantizationInfo qinfo_out)
+ {
+ // Create tensors
+ const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1);
+ TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type, 1, qinfo0);
+ TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type, 1, qinfo1);
+ TensorType dst = create_tensor<TensorType>(out_shape, data_type, 1, qinfo_out);
+
+ // Check whether do in-place computation and whether inputs are broadcast compatible
+ TensorType *actual_dst = &dst;
+ if (_is_inplace)
+ {
+ bool src1_is_inplace =
+ !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) && (qinfo0 == qinfo_out);
+ bool src2_is_inplace =
+ !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) && (qinfo1 == qinfo_out);
+ bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace);
+ ARM_COMPUTE_ASSERT(do_in_place);
+
+ if (src1_is_inplace)
+ {
+ actual_dst = &ref_src1;
+ }
+ else
+ {
+ actual_dst = &ref_src2;
+ }
+ }
+
+ // Create and configure function
+ FunctionType arith_op;
+ arith_op.configure(ref_src1.info(), ref_src2.info(), actual_dst->info(), convert_policy, _act_info);
+
+ ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable());
+
+ // Allocate tensors
+ ref_src1.allocator()->allocate();
+ ref_src2.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable());
+
+ // If don't do in-place computation, still need to allocate original dst
+ if (!_is_inplace)
+ {
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ dst.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ }
+
+ // Fill tensors
+ fill(AccessorType(ref_src1), 0);
+ fill(AccessorType(ref_src2), 1);
+
+ // Compute function
+ ITensorPack run_pack{{arm_compute::TensorType::ACL_SRC_0, &ref_src1},
+ {arm_compute::TensorType::ACL_SRC_1, &ref_src2},
+ {arm_compute::TensorType::ACL_DST, &dst}};
+ arith_op.run(run_pack);
+
+ return std::move(*actual_dst);
+ }
+
+ SimpleTensor<uint8_t> compute_reference(const TensorShape &shape0,
+ const TensorShape &shape1,
+ DataType data_type,
+ ConvertPolicy convert_policy,
+ QuantizationInfo qinfo0,
+ QuantizationInfo qinfo1,
+ QuantizationInfo qinfo_out)
+ {
+ // Create reference
+ SimpleTensor<uint8_t> ref_src1{shape0, data_type, 1, qinfo0};
+ SimpleTensor<uint8_t> ref_src2{shape1, data_type, 1, qinfo1};
+ SimpleTensor<uint8_t> ref_dst{TensorShape::broadcast_shape(shape0, shape1), data_type, 1, qinfo_out};
+
+ // Fill reference
+ fill(ref_src1, 0);
+ fill(ref_src2, 1);
+
+ auto result = reference::arithmetic_operation<uint8_t>(_op, ref_src1, ref_src2, ref_dst, convert_policy);
+ return _act_info.enabled() ? reference::activation_layer(result, _act_info, qinfo_out) : result;
+ }
+
+ TensorType _target{};
+ SimpleTensor<uint8_t> _reference{};
+ reference::ArithmeticOperation _op{reference::ArithmeticOperation::ADD};
+ ActivationLayerInfo _act_info{};
+ bool _is_inplace{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class CpuArithmeticAdditionValidationFixture
+ : public CpuArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, bool is_inplace)
+ {
+ CpuArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType>::setup(
+ reference::ArithmeticOperation::ADD, shape, shape, data_type, convert_policy, QuantizationInfo(),
+ QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class CpuArithmeticSubtractionValidationFixture
+ : public CpuArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType>
+{
+public:
+ void setup(const TensorShape &shape, DataType data_type, ConvertPolicy convert_policy, bool is_inplace)
+ {
+ CpuArithmeticOperationGenericFixture<TensorType, AccessorType, FunctionType>::setup(
+ reference::ArithmeticOperation::SUB, shape, shape, data_type, convert_policy, QuantizationInfo(),
+ QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo(), is_inplace);
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CPUARITHMETICOPERATIONSFIXTURE_H
diff --git a/tests/validation/fixtures/CpuDepthwiseConv2dFixture.h b/tests/validation/fixtures/CpuDepthwiseConv2dFixture.h
new file mode 100644
index 0000000000..1197687358
--- /dev/null
+++ b/tests/validation/fixtures/CpuDepthwiseConv2dFixture.h
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2017-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CPUDEPTHWISECONV2DFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CPUDEPTHWISECONV2DFIXTURE_H
+
+#include "arm_compute/core/experimental/Types.h"
+#include "arm_compute/core/ITensorPack.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+#include "tests/AssetsLibrary.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/DepthwiseConvolutionLayer.h"
+#include "utils/Utils.h"
+
+#include "src/core/helpers/MemoryHelpers.h"
+#include <cstdint>
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::misc::shape_calculator;
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename TW>
+class CpuDepthwiseConv2dValidationGenericFixture : public framework::Fixture
+{
+public:
+ using TBias =
+ typename std::conditional<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T>::type;
+
+ void setup_quantization(TensorShape input_shape,
+ TensorShape weights_shape,
+ QuantizationInfo &input_q_info,
+ QuantizationInfo &weights_q_info,
+ DataType data_type)
+ {
+ ARM_COMPUTE_UNUSED(input_shape);
+ const int32_t t_max = static_cast<int32_t>(std::numeric_limits<T>::max());
+ const int32_t t_min = static_cast<int32_t>(std::numeric_limits<T>::min());
+
+ std::mt19937 generator(library->seed() + _hash);
+ std::uniform_real_distribution<float> distribution_float(-5.0f, 3.0f);
+ std::uniform_int_distribution<int32_t> distribution_t(t_min, t_max);
+
+ const float scale_lhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+ const float scale_rhs = pow(2, distribution_float(generator)); // [2^-5, 2^3]
+
+ const int32_t offset_lhs = distribution_t(generator);
+ const int32_t offset_rhs = distribution_t(generator);
+
+ _input_quantization_info = QuantizationInfo(scale_lhs, offset_lhs);
+ _weights_quantization_info = QuantizationInfo(scale_rhs, offset_rhs);
+
+ QuantizationHint q_hint = suggest_conv_dst_q_info_and_bias(
+ input_q_info, weights_q_info, weights_shape.y() /* heights */, weights_shape.x() /* width */,
+ 1 /* channels */, data_type, 0.5f /* bias_fraction */);
+
+ _output_quantization_info = q_hint.q_info;
+ _min_bias = q_hint.bias_min;
+ _max_bias = q_hint.bias_max;
+ }
+
+public:
+ void setup(TensorShape in_shape,
+ Size2D kernel_size,
+ PadStrideInfo pad_stride_info,
+ Size2D dilation,
+ unsigned int depth_multiplier,
+ DataType input_data_type,
+ DataType weights_data_type,
+ QuantizationInfo input_quantization_info,
+ QuantizationInfo weights_quantization_info,
+ QuantizationInfo output_quantization_info,
+ DataLayout data_layout,
+ ActivationLayerInfo act_info,
+ bool mixed_layout = false,
+ bool in_place = false,
+ bool run_twice = false)
+ {
+ ARM_COMPUTE_ERROR_ON(mixed_layout && in_place);
+
+ _skip_test = false;
+ if (std::is_same<TensorType, Tensor>::value && // Cpu
+ (input_data_type == DataType::F16 || weights_data_type == DataType::F16) && !CPUInfo::get().has_fp16())
+ {
+ _skip_test = true;
+ return;
+ }
+
+ // This hash is used by random generators. There may be hash collisions but
+ // this is intentional as it's a very easy way to make the the current
+ // random generation process almost different for many test configurations,
+ // which were using the same set of values before.
+ _hash = in_shape[0] + in_shape[1] + in_shape[2] + in_shape[3] + kernel_size.width + kernel_size.height +
+ dilation.x() + dilation.y() + pad_stride_info.pad_bottom() + pad_stride_info.pad_left() +
+ pad_stride_info.pad_right() + pad_stride_info.pad_top();
+
+ _mixed_layout = mixed_layout;
+ _input_shape = in_shape;
+ _input_data_type = input_data_type;
+ _weights_data_type = weights_data_type;
+ _data_layout = data_layout;
+ _pad_stride_info = pad_stride_info;
+ _act_info = act_info;
+ _depth_multiplier = depth_multiplier;
+ _dilation = dilation;
+ _in_place = in_place;
+ _run_twice = run_twice;
+
+ _bias_data_type = is_data_type_quantized(_input_data_type) ? DataType::S32 : _input_data_type;
+
+ _weights_shape = TensorShape(kernel_size.width, kernel_size.height);
+
+ const TensorInfo in_info(_input_shape, 1, _input_data_type);
+ const TensorInfo we_info(_weights_shape, 1, _weights_data_type);
+ const ConvolutionInfo info{_pad_stride_info, _depth_multiplier, _act_info, _dilation};
+ _output_shape = compute_depthwise_convolution_shape(in_info, we_info, info);
+
+ _weights_shape.set(2, _output_shape.z());
+ _biases_shape = TensorShape(_weights_shape[2]);
+
+ _input_quantization_info = input_quantization_info;
+ _weights_quantization_info = weights_quantization_info;
+ _output_quantization_info = output_quantization_info;
+
+ if (is_data_type_quantized(_input_data_type) && !is_data_type_quantized_symmetric(weights_data_type) &&
+ (!act_info.enabled() || act_info.activation() == ActivationFunction::IDENTITY))
+ {
+ setup_quantization(in_shape, _weights_shape, _input_quantization_info, _weights_quantization_info,
+ _input_data_type);
+ _use_dynamic_output_quant = true;
+ }
+ }
+
+ void configure_target()
+ {
+ TensorShape input_shape = _input_shape;
+ TensorShape weights_shape = _weights_shape;
+ TensorShape output_shape = _output_shape;
+
+ if (_data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+ }
+
+ // Create tensors
+ _src = create_tensor<TensorType>(input_shape, _input_data_type, 1, _input_quantization_info, _data_layout);
+ _weights =
+ create_tensor<TensorType>(weights_shape, _weights_data_type, 1, _weights_quantization_info, _data_layout);
+ if (_run_twice)
+ {
+ _weights.info()->set_are_values_constant(false);
+ }
+ _biases = create_tensor<TensorType>(_biases_shape, _bias_data_type, 1, _input_quantization_info, _data_layout);
+ TensorType *target_to_use = nullptr;
+ if (!_in_place)
+ {
+ _target =
+ create_tensor<TensorType>(output_shape, _input_data_type, 1, _output_quantization_info, _data_layout);
+ target_to_use = &_target;
+ }
+
+ add_padding_x({&_src, &_biases}, _data_layout);
+ add_padding_x({&_weights}, _data_layout, true);
+ if (!_in_place)
+ {
+ add_padding_x({&_target}, _data_layout);
+ }
+
+ // Create Depthwise Convolution configure function
+ _dwc.configure(_src.info(), _weights.info(), _biases.info(), target_to_use->info(), _pad_stride_info,
+ _depth_multiplier, _act_info, _dilation);
+
+ ARM_COMPUTE_ASSERT(_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_biases.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_target.info()->is_resizable());
+ }
+
+ void allocate_and_run_target()
+ {
+ // Allocate tensors
+ _src.allocator()->allocate();
+ _weights.allocator()->allocate();
+ _biases.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable());
+
+ ITensorPack pack;
+ pack.add_tensor(arm_compute::TensorType::ACL_SRC_0, &_src);
+ pack.add_tensor(arm_compute::TensorType::ACL_SRC_1, &_weights);
+ pack.add_tensor(arm_compute::TensorType::ACL_SRC_2, &_biases);
+ pack.add_tensor(arm_compute::TensorType::ACL_DST, &_target);
+
+ auto mg = MemoryGroup{};
+ auto ws = manage_workspace<Tensor>(_dwc.workspace(), mg, pack, pack);
+
+ _target.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!_target.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(_src), 0 + _hash);
+ fill(AccessorType(_weights), 1 + _hash);
+ fill(AccessorType(_biases), 2 + _hash);
+
+ // Run with variable input
+ if (_run_twice)
+ {
+ _dwc.run(pack);
+
+ // Fill tensors with a new seed
+ fill(AccessorType(_src), 3 + _hash);
+ fill(AccessorType(_weights), 4 + _hash);
+ fill(AccessorType(_biases), 5 + _hash);
+ }
+
+ if (_mixed_layout)
+ {
+ mix_layout(_dwc, _src, _target);
+ }
+ else
+ {
+ // Compute function
+ _dwc.run(pack);
+ }
+ }
+
+ void compute_reference()
+ {
+ SimpleTensor<T> src{_input_shape, _input_data_type, 1, _input_quantization_info};
+ SimpleTensor<TW> weights{_weights_shape, _weights_data_type, 1, _weights_quantization_info};
+ SimpleTensor<TBias> biases{_biases_shape, _bias_data_type, 1, _input_quantization_info};
+
+ fill(src, 0 + _hash);
+ fill(weights, 1 + _hash);
+ fill(biases, 2 + _hash);
+
+ if (_run_twice)
+ {
+ SimpleTensor<T> depth_out =
+ reference::depthwise_convolution(src, weights, biases, _output_shape, _pad_stride_info,
+ _depth_multiplier, _dilation, _output_quantization_info);
+ if (_act_info.enabled())
+ {
+ reference::activation_layer<T>(depth_out, _act_info);
+ }
+
+ fill(src, 3 + _hash);
+ fill(weights, 4 + _hash);
+ fill(biases, 5 + _hash);
+ }
+
+ SimpleTensor<T> depth_out =
+ reference::depthwise_convolution(src, weights, biases, _output_shape, _pad_stride_info, _depth_multiplier,
+ _dilation, _output_quantization_info);
+ _reference = (_act_info.enabled()) ? reference::activation_layer<T>(depth_out, _act_info) : depth_out;
+ }
+
+protected:
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ ARM_COMPUTE_ERROR_ON(_in_place);
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ ITensorPack pack;
+ pack.add_tensor(arm_compute::TensorType::ACL_SRC_0, &_src);
+ pack.add_tensor(arm_compute::TensorType::ACL_SRC_1, &_weights);
+ pack.add_tensor(arm_compute::TensorType::ACL_SRC_2, &_biases);
+ pack.add_tensor(arm_compute::TensorType::ACL_DST, &_target);
+
+ auto mg = MemoryGroup{};
+ auto ws = manage_workspace<Tensor>(_dwc.workspace(), mg, pack, pack);
+
+ // Compute Convolution function
+ layer.run(pack);
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(_data_layout);
+ dst.info()->set_data_layout(_data_layout);
+ }
+
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch (tensor.data_type())
+ {
+ case DataType::QASYMM8:
+ {
+ if (_use_dynamic_output_quant)
+ {
+ std::uniform_int_distribution<int32_t> distribution(0, 255);
+ library->fill(tensor, distribution, i);
+ }
+ else
+ {
+ // Legacy initialization in case the output quantization info can't be reliably estimated
+ std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+ std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ }
+ case DataType::QASYMM8_SIGNED:
+ {
+ if (_use_dynamic_output_quant)
+ {
+ std::uniform_int_distribution<int32_t> distribution(-128, 127);
+ library->fill(tensor, distribution, i);
+ }
+ else
+ {
+ // Legacy initialization in case the output quantization info can't be reliably estimated
+ std::pair<int, int> bounds =
+ get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+ std::uniform_int_distribution<int32_t> distribution(bounds.first, bounds.second);
+ library->fill(tensor, distribution, i);
+ }
+ break;
+ }
+ case DataType::QSYMM8_PER_CHANNEL:
+ {
+ int min_bound = 128;
+ int max_bound = -127;
+ for (size_t i = 0; i < _weights_quantization_info.scale().size(); i++)
+ {
+ std::pair<int, int> bounds =
+ get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i);
+ if (bounds.first < min_bound)
+ {
+ min_bound = bounds.first;
+ }
+ if (bounds.second > max_bound)
+ {
+ max_bound = bounds.second;
+ }
+ }
+ std::uniform_int_distribution<int32_t> distribution(min_bound, max_bound);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::S32:
+ {
+ std::uniform_int_distribution<int32_t> distribution(_min_bias, _max_bias);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::BFLOAT16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{-1.0f, 1.0f};
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-1.0f, 1.0f};
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+
+ TensorType _src{};
+ TensorType _weights{};
+ TensorType _biases{};
+ FunctionType _dwc{};
+
+ TensorShape _input_shape{};
+ TensorShape _weights_shape{};
+ TensorShape _biases_shape{};
+ TensorShape _output_shape{};
+ DataType _input_data_type{};
+ DataType _weights_data_type{};
+ DataType _bias_data_type{};
+ QuantizationInfo _input_quantization_info{};
+ QuantizationInfo _weights_quantization_info{};
+ QuantizationInfo _output_quantization_info{};
+ DataLayout _data_layout{};
+ PadStrideInfo _pad_stride_info{};
+ ActivationLayerInfo _act_info{};
+ unsigned int _depth_multiplier{};
+ Size2D _dilation{};
+ bool _mixed_layout{false};
+ bool _in_place{false};
+ bool _run_twice{false};
+ bool _use_dynamic_output_quant{false};
+ bool _skip_test{false};
+
+ int32_t _hash{0};
+ // Random initialization limits
+ // Default values are previously handcrafted limits
+ // that sould be used when we don't use dynamic quantization
+ int32_t _min_bias{-100};
+ int32_t _max_bias{100};
+ int32_t _min_u8{0};
+ int32_t _max_u8{50};
+ int32_t _min_s8{-25};
+ int32_t _max_s8{25};
+};
+
+template <typename TensorType,
+ typename AccessorType,
+ typename FunctionType,
+ typename T,
+ bool mixed_layout = false,
+ bool in_place = false,
+ bool run_twice = false>
+class CpuDepthwiseConv2dValidationFixture
+ : public CpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
+{
+public:
+ void setup(TensorShape in_shape,
+ Size2D kernel_size,
+ PadStrideInfo pad_stride_info,
+ Size2D dilation,
+ unsigned int depth_multiplier,
+ DataType data_type,
+ DataLayout data_layout,
+ ActivationLayerInfo act_info)
+ {
+ CpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(
+ in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, data_type, data_type,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), data_layout, act_info, mixed_layout, in_place,
+ run_twice);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class CpuDepthwiseConv2dNativeValidationFixture
+ : public CpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
+{
+public:
+ void setup(size_t width,
+ size_t height,
+ size_t channel,
+ size_t batch,
+ Size2D kernel_size,
+ size_t depth_multiplier,
+ Size2D dilation,
+ Size2D stride,
+ bool padding_valid,
+ DataType data_type,
+ DataLayout data_layout)
+ {
+ _dilation = dilation;
+ _depth_multiplier = depth_multiplier;
+ _data_type = data_type;
+ _data_layout = data_layout;
+
+ _input_shape = TensorShape(width, height, channel, batch);
+ _weights_shape = TensorShape(kernel_size.width, kernel_size.height, channel * _depth_multiplier);
+ _biases_shape = TensorShape(_weights_shape.z());
+
+ if (padding_valid)
+ {
+ _conv_info = PadStrideInfo(stride.width, stride.height);
+ }
+ else
+ {
+ _conv_info = calculate_same_pad(_input_shape, _weights_shape, PadStrideInfo(stride.width, stride.height),
+ DataLayout::NCHW, _dilation);
+ }
+ }
+
+ void configure_target()
+ {
+ TensorShape input_shape = _input_shape;
+ TensorShape weights_shape = _weights_shape;
+
+ if (_data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ }
+
+ // Create tensors
+ _src = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ _weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ _biases = create_tensor<TensorType>(_biases_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ _target = create_tensor<TensorType>(TensorShape(), _data_type, 1, QuantizationInfo(), _data_layout);
+
+ add_padding_x({&_src, &_biases, &_target}, _data_layout);
+ add_padding_x({&_weights}, _data_layout, true);
+
+ // Create Depthwise Convolution configure function
+ const ConvolutionInfo info{_conv_info, _depth_multiplier, ActivationLayerInfo(), _dilation};
+ _dwc.configure(_src.info(), _weights.info(), _biases.info(), _target.info(), info);
+
+ ARM_COMPUTE_ASSERT(_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_biases.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_target.info()->is_resizable());
+ }
+
+ void allocate_and_run_target()
+ {
+ // Allocate tensors
+ _src.allocator()->allocate();
+ _weights.allocator()->allocate();
+ _biases.allocator()->allocate();
+ _target.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_target.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(_src), 0);
+ fill(AccessorType(_weights), 1);
+ fill(AccessorType(_biases), 2);
+
+ arm_compute::ITensorPack pack;
+ pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_0, &_src);
+ pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_1, &_weights);
+ pack.add_const_tensor(arm_compute::TensorType::ACL_SRC_2, &_biases);
+ pack.add_tensor(arm_compute::TensorType::ACL_DST, &_target);
+
+ // Compute function
+ _dwc.run(pack);
+ }
+
+ void compute_reference()
+ {
+ SimpleTensor<T> src{_input_shape, _data_type};
+ SimpleTensor<T> weights{_weights_shape, _data_type};
+ SimpleTensor<T> biases{_biases_shape, _data_type};
+
+ fill(src, 0);
+ fill(weights, 1);
+ fill(biases, 2);
+
+ const ConvolutionInfo info{_conv_info, _depth_multiplier, ActivationLayerInfo(), _dilation};
+ const TensorShape dst_shape = compute_depthwise_convolution_shape(
+ TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), info);
+ _reference =
+ reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch (tensor.data_type())
+ {
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+
+ TensorType _src{};
+ TensorType _weights{};
+ TensorType _biases{};
+ FunctionType _dwc{};
+
+ TensorShape _input_shape{};
+ TensorShape _weights_shape{};
+ TensorShape _biases_shape{};
+ DataType _data_type{};
+ DataLayout _data_layout{};
+ PadStrideInfo _conv_info{};
+ Size2D _dilation{};
+ unsigned int _depth_multiplier{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool in_place = false>
+class CpuDepthwiseConv2dNativeConfigurableValidationFixture
+ : public CpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
+{
+public:
+ void setup(size_t width,
+ size_t height,
+ size_t channel,
+ size_t batch,
+ Size2D kernel_size,
+ size_t depth_multiplier,
+ Size2D dilation,
+ Size2D stride,
+ bool padding_valid,
+ DataType data_type,
+ DataLayout data_layout,
+ const ActivationLayerInfo &act_info,
+ unsigned int n0,
+ bool export_to_cl_image)
+ {
+ _dilation = dilation;
+ _depth_multiplier = depth_multiplier;
+ _data_type = data_type;
+ _data_layout = data_layout;
+ _act_info = act_info;
+ _n0 = n0;
+ _export_to_cl_image = export_to_cl_image;
+ _in_place = in_place;
+
+ _input_shape = TensorShape(width, height, channel, batch);
+ _weights_shape = TensorShape(kernel_size.width, kernel_size.height, channel * _depth_multiplier);
+ _biases_shape = TensorShape(_weights_shape.z());
+
+ if (padding_valid)
+ {
+ _conv_info = calculate_same_pad(_input_shape, _weights_shape, PadStrideInfo(stride.width, stride.height),
+ DataLayout::NCHW, _dilation);
+ }
+ else
+ {
+ _conv_info = PadStrideInfo(stride.width, stride.height);
+ }
+ }
+
+ void configure_target()
+ {
+#if defined(ARM_COMPUTE_OPENCL_ENABLED)
+ if (_export_to_cl_image)
+ {
+ _validate_output &= image2d_from_buffer_supported(CLKernelLibrary::get().get_device());
+ _validate_output &= (get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) != 0);
+ }
+#endif // ARM_COMPUTE_OPENCL_ENABLED
+
+ if (!_validate_output)
+ {
+ return;
+ }
+
+ TensorShape input_shape = _input_shape;
+ TensorShape weights_shape = _weights_shape;
+
+ if (_data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ }
+
+ // Create tensors
+ _src = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ _weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ _biases = create_tensor<TensorType>(_biases_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType *target_to_use = nullptr;
+ if (!_in_place)
+ {
+ _target = create_tensor<TensorType>(TensorShape(), _data_type, 1, QuantizationInfo(), _data_layout);
+ target_to_use = &_target;
+ }
+
+ DWCComputeKernelInfo dwc_info;
+ dwc_info.n0 = _n0;
+ dwc_info.m0 = _conv_info.stride().first == 1 && _dilation.x() == 1 ? 8 : 1;
+ dwc_info.export_input_to_cl_image = false;
+ dwc_info.export_weights_to_cl_image = _export_to_cl_image;
+
+ const ConvolutionInfo conv_kernel_info{_conv_info, _depth_multiplier, _act_info, _dilation};
+
+ add_padding_x({&_src, &_biases, &_target}, _data_layout);
+ add_padding_x({&_weights}, _data_layout,
+ _export_to_cl_image); // Don't add left padding if cl image will be used
+
+ // Create Depthwise Convolution configure function
+ _dwc.configure(&_src, &_weights, &_biases, target_to_use, dwc_info, conv_kernel_info);
+
+ ARM_COMPUTE_ASSERT(_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_biases.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(_target.info()->is_resizable());
+ }
+
+ void allocate_and_run_target()
+ {
+ if (!_validate_output)
+ {
+ return;
+ }
+
+ // Allocate tensors
+ _src.allocator()->allocate();
+ _weights.allocator()->allocate();
+ _biases.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!_src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!_biases.info()->is_resizable());
+ if (!_in_place)
+ {
+ _target.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!_target.info()->is_resizable());
+ }
+
+ // Fill tensors
+ fill(AccessorType(_src), 0);
+ fill(AccessorType(_weights), 1);
+ fill(AccessorType(_biases), 2);
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ _src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ if (!_in_place)
+ {
+ _target.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ }
+
+ // Compute function
+ _dwc.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ if (!_in_place)
+ {
+ _target.info()->set_data_layout(_data_layout);
+ }
+ }
+
+ void compute_reference()
+ {
+ if (!_validate_output)
+ {
+ return;
+ }
+
+ SimpleTensor<T> src{_input_shape, _data_type};
+ SimpleTensor<T> weights{_weights_shape, _data_type};
+ SimpleTensor<T> biases{_biases_shape, _data_type};
+
+ fill(src, 0);
+ fill(weights, 1);
+ fill(biases, 2);
+
+ const ConvolutionInfo info{_conv_info, _depth_multiplier, _act_info, _dilation};
+ const TensorShape dst_shape = compute_depthwise_convolution_shape(
+ TensorInfo(_input_shape, 1, _data_type), TensorInfo(_weights_shape, 1, _data_type), info);
+ _reference = reference::activation_layer(
+ reference::depthwise_convolution(src, weights, biases, dst_shape, _conv_info, _depth_multiplier, _dilation),
+ _act_info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ switch (tensor.data_type())
+ {
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{-1.0f, 1.0f};
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+
+ TensorType _src{};
+ TensorType _weights{};
+ TensorType _biases{};
+ FunctionType _dwc{};
+
+ TensorShape _input_shape{};
+ TensorShape _weights_shape{};
+ TensorShape _biases_shape{};
+ DataType _data_type{};
+ DataLayout _data_layout{};
+ PadStrideInfo _conv_info{};
+ ActivationLayerInfo _act_info{};
+ Size2D _dilation{};
+ unsigned int _depth_multiplier{};
+ unsigned int _n0{};
+ bool _export_to_cl_image{};
+ bool _validate_output{true};
+ bool _in_place{false};
+};
+
+template <typename TensorType,
+ typename AccessorType,
+ typename FunctionType,
+ typename T,
+ bool mixed_layout = false,
+ bool in_place = false>
+class CpuDepthwiseConv2dValidationQuantizedFixture
+ : public CpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
+{
+public:
+ void setup(TensorShape in_shape,
+ Size2D kernel_size,
+ PadStrideInfo pad_stride_info,
+ Size2D dilation,
+ unsigned int depth_multiplier,
+ DataType data_type,
+ QuantizationInfo input_quantization_info,
+ QuantizationInfo output_quantization_info,
+ DataLayout data_layout,
+ ActivationLayerInfo act_info)
+ {
+ CpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(
+ in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, data_type, data_type,
+ input_quantization_info, input_quantization_info, output_quantization_info, data_layout, act_info,
+ mixed_layout, in_place);
+ }
+};
+
+template <typename TensorType,
+ typename AccessorType,
+ typename FunctionType,
+ typename T,
+ typename TW,
+ bool in_place = false>
+class CpuDepthwiseConv2dValidationQuantizedPerChannelFixture
+ : public CpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>
+{
+public:
+ void setup(TensorShape in_shape,
+ Size2D kernel_size,
+ PadStrideInfo pad_stride_info,
+ Size2D dilation,
+ unsigned int depth_multiplier,
+ DataType input_data_type,
+ DataType weights_data_type,
+ QuantizationInfo input_quantization_info,
+ QuantizationInfo output_quantization_info,
+ DataLayout data_layout,
+ ActivationLayerInfo act_info)
+ {
+ const float out_scale = output_quantization_info.uniform().scale;
+ const float in_scale = input_quantization_info.uniform().scale;
+
+ std::vector<float> weights_scales{};
+ std::mt19937 gen(library->seed());
+ std::uniform_real_distribution<float> dis(0.01f, out_scale / in_scale);
+ for (size_t i = 0; i < in_shape.z() * depth_multiplier; ++i)
+ {
+ weights_scales.push_back(dis(gen));
+ }
+
+ CpuDepthwiseConv2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T, TW>::setup(
+ in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, input_data_type, weights_data_type,
+ input_quantization_info, QuantizationInfo(weights_scales), output_quantization_info, data_layout, act_info,
+ false, in_place);
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CPUDEPTHWISECONV2DFIXTURE_H
diff --git a/tests/validation/fixtures/CpuElementwiseFixture.h b/tests/validation/fixtures/CpuElementwiseFixture.h
new file mode 100644
index 0000000000..29757325ff
--- /dev/null
+++ b/tests/validation/fixtures/CpuElementwiseFixture.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CPUELEMENTWISEFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CPUELEMENTWISEFIXTURE_H
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+
+#include "tests/AssetsLibrary.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/ElementwiseOperations.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class CpuElementwiseOperationsGenericFixture : public framework::Fixture
+{
+public:
+ void setup(ArithmeticOperation op,
+ const TensorShape &shape0,
+ const TensorShape &shape1,
+ DataType data_type0,
+ DataType data_type1,
+ DataType output_data_type,
+ bool is_inplace = false)
+ {
+ if (std::is_same<TensorType, Tensor>::value && // Cpu
+ (data_type0 == DataType::F16 || data_type1 == DataType::F16 || output_data_type == DataType::F16) &&
+ !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
+ _op = op;
+ _is_inplace = is_inplace;
+
+ _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type);
+ _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ if (is_data_type_float(tensor.data_type()))
+ {
+ switch (_op)
+ {
+ case ArithmeticOperation::DIV:
+ library->fill_tensor_uniform_ranged(tensor, i, {std::pair<float, float>(-0.001f, 0.001f)});
+ break;
+ case ArithmeticOperation::POWER:
+ library->fill_tensor_uniform(tensor, i, 0.0f, 5.0f);
+ break;
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+ else
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ TensorType compute_target(const TensorShape &shape0,
+ const TensorShape &shape1,
+ DataType data_type0,
+ DataType data_type1,
+ DataType output_data_type)
+ {
+ // Create tensors
+ const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1);
+ TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, QuantizationInfo());
+ TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, QuantizationInfo());
+ TensorType dst = create_tensor<TensorType>(out_shape, output_data_type, 1, QuantizationInfo());
+
+ // Check whether do in-place computation and whether inputs are broadcast compatible
+ TensorType *actual_dst = &dst;
+ if (_is_inplace)
+ {
+ bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) &&
+ (data_type0 == output_data_type);
+ bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) &&
+ (data_type1 == output_data_type);
+ bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace);
+ ARM_COMPUTE_ASSERT(do_in_place);
+
+ if (src1_is_inplace)
+ {
+ actual_dst = &ref_src1;
+ }
+ else
+ {
+ actual_dst = &ref_src2;
+ }
+ }
+
+ // Create and configure function
+ FunctionType elem_op;
+ elem_op.configure(ref_src1.info(), ref_src2.info(), actual_dst->info());
+
+ ARM_COMPUTE_ASSERT(ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(ref_src2.info()->is_resizable());
+
+ // Allocate tensors
+ ref_src1.allocator()->allocate();
+ ref_src2.allocator()->allocate();
+
+ // If don't do in-place computation, still need to allocate original dst
+ if (!_is_inplace)
+ {
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ dst.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ }
+
+ ARM_COMPUTE_ASSERT(!ref_src1.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!ref_src2.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(ref_src1), 0);
+ fill(AccessorType(ref_src2), 1);
+
+ // Compute function
+ ITensorPack run_pack{{arm_compute::TensorType::ACL_SRC_0, &ref_src1},
+ {arm_compute::TensorType::ACL_SRC_1, &ref_src2},
+ {arm_compute::TensorType::ACL_DST, actual_dst}
+
+ };
+
+ elem_op.run(run_pack);
+
+ return std::move(*actual_dst);
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape0,
+ const TensorShape &shape1,
+ DataType data_type0,
+ DataType data_type1,
+ DataType output_data_type)
+ {
+ // Create reference
+ SimpleTensor<T> ref_src1{shape0, data_type0, 1, QuantizationInfo()};
+ SimpleTensor<T> ref_src2{shape1, data_type1, 1, QuantizationInfo()};
+ SimpleTensor<T> ref_dst{TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, QuantizationInfo()};
+
+ // Fill reference
+ fill(ref_src1, 0);
+ fill(ref_src2, 1);
+
+ return reference::arithmetic_operation<T>(_op, ref_src1, ref_src2, ref_dst);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ ArithmeticOperation _op{ArithmeticOperation::ADD};
+ bool _is_inplace{false};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class CpuElementwiseDivisionValidationFixture
+ : public CpuElementwiseOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(
+ const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
+ {
+ CpuElementwiseOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ ArithmeticOperation::DIV, shape, shape, data_type0, data_type1, output_data_type, is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class CpuElementwiseMaxValidationFixture
+ : public CpuElementwiseOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(
+ const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
+ {
+ CpuElementwiseOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ ArithmeticOperation::MAX, shape, shape, data_type0, data_type1, output_data_type, is_inplace);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class CpuElementwiseMinValidationFixture
+ : public CpuElementwiseOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(
+ const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, bool is_inplace)
+ {
+ CpuElementwiseOperationsGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ ArithmeticOperation::MIN, shape, shape, data_type0, data_type1, output_data_type, is_inplace);
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CPUELEMENTWISEFIXTURE_H
diff --git a/tests/validation/fixtures/CpuGemmAssemblyDispatchFixture.h b/tests/validation/fixtures/CpuGemmAssemblyDispatchFixture.h
new file mode 100644
index 0000000000..fc070eb7a0
--- /dev/null
+++ b/tests/validation/fixtures/CpuGemmAssemblyDispatchFixture.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2017-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CPUGEMMASSEMBLYDISPATCHFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CPUGEMMASSEMBLYDISPATCHFIXTURE_H
+
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/GEMM.h"
+#include "arm_compute/core/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class CpuGemmAssemblyDispatchGenericValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape shape_c,
+ TensorShape output_shape,
+ float alpha,
+ float beta,
+ DataType data_type,
+ bool accumulate)
+ {
+ ARM_COMPUTE_UNUSED(alpha);
+ ARM_COMPUTE_UNUSED(beta);
+ _target = compute_target(shape_a, shape_b, shape_c, output_shape, data_type, accumulate);
+ _reference = compute_reference(shape_a, shape_b, output_shape, data_type, accumulate);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, float lo = -1.f, float hi = 1.f)
+ {
+ switch (tensor.data_type())
+ {
+ case DataType::F16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{float(lo), float(hi)};
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ case DataType::F32:
+ {
+ std::uniform_real_distribution<float> distribution(lo, hi);
+ library->fill(tensor, distribution, i);
+ break;
+ }
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ }
+ }
+
+ TensorType compute_target(const TensorShape &shape_a,
+ const TensorShape &shape_b,
+ const TensorShape &shape_c,
+ const TensorShape &output_shape,
+ DataType data_type,
+ bool accumulate)
+ {
+ ARM_COMPUTE_UNUSED(shape_c);
+ // Create tensors
+ TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
+ TensorType *c = nullptr;
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
+
+ // Create and configure function
+ FunctionType gemm;
+
+ add_padding_x({&a, &b, &dst});
+
+ GEMMInfo gemm_info;
+ gemm_info.set_accumulate(accumulate);
+
+ ARM_COMPUTE_ASSERT(gemm.validate(a.info(), b.info(), nullptr, dst.info(), gemm_info));
+
+ // The GEMMinfo includes the values of the depth in case of reinterpreted 3d output.
+ // If the output shape has the same number of dimensions of the input the method called is a 2D matrix multiplication (depth_output_reinterpreted_as_3D = 0),
+ // in the other case we have to use the reinterpreted version of GEMM (depth_output_reinterpreted_as_3D = depth of the 3D output).
+ gemm.configure(a.info(), b.info(), nullptr, dst.info(), gemm_info);
+
+ ARM_COMPUTE_ASSERT(gemm.is_configured());
+
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(a), 0, -1.f, 1.f);
+ fill(AccessorType(b), 1, -1.f, 1.f);
+ if (accumulate)
+ {
+ fill(AccessorType(dst), 6);
+ };
+
+ ITensorPack run_pack{{arm_compute::TensorType::ACL_SRC_0, &a},
+ {arm_compute::TensorType::ACL_SRC_1, &b},
+ {arm_compute::TensorType::ACL_SRC_2, c},
+ {arm_compute::TensorType::ACL_DST_0, &dst}};
+
+ // Prepare memory
+ ITensorPack prep_pack{{arm_compute::TensorType::ACL_SRC_1, &b}, {arm_compute::TensorType::ACL_SRC_2, c}};
+
+ experimental::MemoryRequirements aux_mem_req = gemm.workspace();
+ MemoryGroup memory_group{};
+
+ WorkspaceData<Tensor> workspace = manage_workspace<Tensor>(aux_mem_req, memory_group, run_pack, prep_pack);
+
+ gemm.prepare(prep_pack);
+ MemoryGroupResourceScope scope_mg(memory_group);
+
+ auto has_reshape = std::find_if(aux_mem_req.begin(), aux_mem_req.end(),
+ [](const arm_compute::experimental::MemoryInfo &m) -> bool {
+ return m.lifetime == arm_compute::experimental::MemoryLifetime::Persistent;
+ });
+
+ if (has_reshape != std::end(aux_mem_req))
+ {
+ b.mark_as_unused();
+ }
+ else
+ {
+ run_pack.add_const_tensor(ACL_SRC_1, &b);
+ }
+
+ // Release temporary tensors that are only used in prepare stage
+ release_temporaries<Tensor>(aux_mem_req, workspace);
+ // End of preparing
+
+ // Compute GEMM function
+ gemm.run(run_pack);
+
+ a.allocator()->free();
+ b.allocator()->free();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape_a,
+ const TensorShape &shape_b,
+ const TensorShape &output_shape,
+ DataType data_type,
+ bool accumulate)
+ {
+ // Create reference
+ SimpleTensor<T> a{shape_a, data_type, 1};
+ SimpleTensor<T> b{shape_b, data_type, 1};
+ SimpleTensor<T> c{output_shape, data_type, 1};
+ SimpleTensor<T> dst{output_shape, data_type, 1};
+
+ // Fill reference
+ fill(a, 0, -1.f, 1.f);
+ fill(b, 1, -1.f, 1.f);
+ fill(c, 2);
+
+ // Do in place summation
+ if (accumulate)
+ {
+ fill(dst, 6);
+ }
+
+ // Setting beta to 0 will effectively disable C for the
+ // computation of the reference: A * B + 0 * C
+ // Use transposed tensors if boolean enabled else use original tensors
+ if (accumulate)
+ {
+ reference::gemm_accumulate<T>(a, b, c, 1.0f, 0.f, dst);
+ return dst;
+ }
+ else
+ {
+ return reference::gemm<T>(a, b, c, 1.f, 0.f);
+ }
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool accumulate>
+class CpuGemmAssemblyDispatchValidationFixture
+ : protected CpuGemmAssemblyDispatchGenericValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape shape_c,
+ TensorShape output_shape,
+ float alpha,
+ float beta,
+ DataType data_type)
+ {
+ CpuGemmAssemblyDispatchGenericValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(
+ shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, accumulate);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CPUGEMMASSEMBLYDISPATCHFIXTURE_H
diff --git a/tests/validation/fixtures/CpuGemmConv2dFixture.h b/tests/validation/fixtures/CpuGemmConv2dFixture.h
new file mode 100644
index 0000000000..c8e82fb8a0
--- /dev/null
+++ b/tests/validation/fixtures/CpuGemmConv2dFixture.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CPUGEMMCONV2DFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CPUGEMMCONV2DFIXTURE_H
+
+#include "arm_compute/core/experimental/Types.h"
+#include "arm_compute/core/QuantizationInfo.h"
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/function_info/ActivationLayerInfo.h"
+#include "arm_compute/graph/Utils.h"
+
+#include "src/core/helpers/MemoryHelpers.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ConvolutionLayer.h"
+#include "tests/validation/reference/Utils.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class CpuGemmConv2dValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape input_shape,
+ TensorShape weights_shape,
+ TensorShape bias_shape,
+ TensorShape output_shape,
+ PadStrideInfo info,
+ Size2D dilation)
+ {
+ _dilation = dilation;
+ _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] + weights_shape[0] +
+ weights_shape[1] + weights_shape[2] + weights_shape[3];
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ }
+
+ TensorType compute_target(TensorShape input_shape,
+ TensorShape weights_shape,
+ const TensorShape &bias_shape,
+ TensorShape output_shape,
+ const PadStrideInfo &info)
+ {
+ // We need to permute to the same layout that the reference impl needs.
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+
+ const auto src_info = TensorInfo(input_shape, 1, DataType::F32, _data_layout);
+ const auto weights_info = TensorInfo(weights_shape, 1, DataType::F32, _data_layout);
+ const auto biases_info = TensorInfo(bias_shape, 1, DataType::F32, _data_layout);
+ auto dst_info = TensorInfo(output_shape, 1, DataType::F32, _data_layout);
+
+ auto conv = std::make_unique<FunctionType>();
+ conv->configure(&src_info, &weights_info, &biases_info, &dst_info, info);
+ ARM_COMPUTE_ASSERT(conv->validate(&src_info, &weights_info, &biases_info, &dst_info, info));
+
+ // Create tensors
+ auto src = create_tensor<Tensor>(src_info);
+ auto weights = create_tensor<Tensor>(weights_info);
+ auto biases = create_tensor<Tensor>(biases_info);
+ auto dst = create_tensor<Tensor>(dst_info);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ biases.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ITensorPack run_pack{{arm_compute::TensorType::ACL_SRC_0, &src},
+ {arm_compute::TensorType::ACL_SRC_1, &weights},
+ {arm_compute::TensorType::ACL_SRC_2, &biases},
+ {arm_compute::TensorType::ACL_DST, &dst}};
+ ITensorPack prep_pack{{arm_compute::TensorType::ACL_SRC_1, &weights},
+ {arm_compute::TensorType::ACL_SRC_2, &biases}};
+
+ auto const aux_mem_req = conv->workspace();
+ auto mg = MemoryGroup{};
+ auto ws = manage_workspace<Tensor>(aux_mem_req, mg, run_pack, prep_pack);
+
+ // Fill tensors
+ fill(AccessorType(src), 0 + _hash);
+ fill(AccessorType(weights), 1 + _hash);
+ fill(AccessorType(biases), 2 + _hash);
+
+ conv->prepare(prep_pack);
+ conv->run(run_pack);
+
+ src.allocator()->free();
+ weights.allocator()->free();
+ biases.allocator()->free();
+
+ return dst;
+ }
+
+ SimpleTensor<float> compute_reference(const TensorShape &input_shape,
+ const TensorShape &weights_shape,
+ const TensorShape &bias_shape,
+ const TensorShape &output_shape,
+ const PadStrideInfo &info)
+ {
+ // Create reference
+ SimpleTensor<float> src{input_shape, DataType::F32};
+ SimpleTensor<float> weights{weights_shape, DataType::F32};
+ SimpleTensor<float> bias{bias_shape, DataType::F32};
+
+ fill(src, 0 + _hash);
+ fill(weights, 1 + _hash);
+ fill(bias, 2 + _hash);
+
+ return reference::convolution_layer<float>(src, weights, bias, output_shape, info, _dilation);
+ }
+
+ TensorType _target{};
+ SimpleTensor<float> _reference{};
+ Size2D _dilation{};
+ int32_t _hash{0};
+ DataLayout _data_layout{DataLayout::NHWC};
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CPUGEMMCONV2DFIXTURE_H
diff --git a/tests/validation/fixtures/CpuGemmDirectConv2dFixture.h b/tests/validation/fixtures/CpuGemmDirectConv2dFixture.h
new file mode 100644
index 0000000000..2e4000117f
--- /dev/null
+++ b/tests/validation/fixtures/CpuGemmDirectConv2dFixture.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CPUGEMMDIRECTCONV2DFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CPUGEMMDIRECTCONV2DFIXTURE_H
+
+#include "arm_compute/core/experimental/Types.h"
+#include "arm_compute/core/QuantizationInfo.h"
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/function_info/ActivationLayerInfo.h"
+#include "arm_compute/graph/Utils.h"
+
+#include "src/core/helpers/MemoryHelpers.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ConvolutionLayer.h"
+#include "tests/validation/reference/Utils.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class CpuGemmDirectConv2dValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape input_shape,
+ TensorShape weights_shape,
+ TensorShape bias_shape,
+ TensorShape output_shape,
+ PadStrideInfo info,
+ Size2D dilation)
+ {
+ _dilation = dilation;
+ _hash = input_shape[0] + input_shape[1] + input_shape[2] + input_shape[3] + weights_shape[0] +
+ weights_shape[1] + weights_shape[2] + weights_shape[3];
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ }
+
+ TensorType compute_target(TensorShape input_shape,
+ TensorShape weights_shape,
+ const TensorShape &bias_shape,
+ TensorShape output_shape,
+ const PadStrideInfo &info)
+ {
+ // We need to permute to the same layout that the reference impl needs.
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+
+ const auto src_info = TensorInfo(input_shape, 1, DataType::F32, _data_layout);
+ const auto weights_info = TensorInfo(weights_shape, 1, DataType::F32, _data_layout);
+ const auto biases_info = TensorInfo(bias_shape, 1, DataType::F32, _data_layout);
+ auto dst_info = TensorInfo(output_shape, 1, DataType::F32, _data_layout);
+ const auto conv_info = Conv2dInfo{info, _dilation, ActivationLayerInfo(), false, 1};
+
+ auto conv = std::make_unique<FunctionType>();
+ conv->configure(&src_info, &weights_info, &biases_info, &dst_info, conv_info);
+ ARM_COMPUTE_ASSERT(conv->validate(&src_info, &weights_info, &biases_info, &dst_info, conv_info));
+
+ // Create tensors
+ auto src = create_tensor<Tensor>(src_info);
+ auto weights = create_tensor<Tensor>(weights_info);
+ auto biases = create_tensor<Tensor>(biases_info);
+ auto dst = create_tensor<Tensor>(dst_info);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ biases.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ITensorPack run_pack{{arm_compute::TensorType::ACL_SRC_0, &src},
+ {arm_compute::TensorType::ACL_SRC_1, &weights},
+ {arm_compute::TensorType::ACL_SRC_2, &biases},
+ {arm_compute::TensorType::ACL_DST, &dst}};
+ ITensorPack prep_pack{{arm_compute::TensorType::ACL_SRC_1, &weights},
+ {arm_compute::TensorType::ACL_SRC_2, &biases}};
+
+ auto const aux_mem_req = conv->workspace();
+ auto mg = MemoryGroup{};
+ auto ws = manage_workspace<Tensor>(aux_mem_req, mg, run_pack, prep_pack);
+
+ // Fill tensors
+ fill(AccessorType(src), 0 + _hash);
+ fill(AccessorType(weights), 1 + _hash);
+ fill(AccessorType(biases), 2 + _hash);
+
+ conv->prepare(prep_pack);
+ conv->run(run_pack);
+
+ src.allocator()->free();
+ weights.allocator()->free();
+ biases.allocator()->free();
+
+ return dst;
+ }
+
+ SimpleTensor<float> compute_reference(const TensorShape &input_shape,
+ const TensorShape &weights_shape,
+ const TensorShape &bias_shape,
+ const TensorShape &output_shape,
+ const PadStrideInfo &info)
+ {
+ // Create reference
+ SimpleTensor<float> src{input_shape, DataType::F32};
+ SimpleTensor<float> weights{weights_shape, DataType::F32};
+ SimpleTensor<float> bias{bias_shape, DataType::F32};
+
+ fill(src, 0 + _hash);
+ fill(weights, 1 + _hash);
+ fill(bias, 2 + _hash);
+
+ return reference::convolution_layer<float>(src, weights, bias, output_shape, info, _dilation);
+ }
+
+ TensorType _target{};
+ int32_t _hash{0};
+ SimpleTensor<float> _reference{};
+ Size2D _dilation{};
+ DataLayout _data_layout{DataLayout::NHWC};
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CPUGEMMDIRECTCONV2DFIXTURE_H
diff --git a/tests/validation/fixtures/CpuMulFixture.h b/tests/validation/fixtures/CpuMulFixture.h
new file mode 100644
index 0000000000..ec16c9fa1f
--- /dev/null
+++ b/tests/validation/fixtures/CpuMulFixture.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CPUMULFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CPUMULFIXTURE_H
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+
+#include "tests/AssetsLibrary.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/PixelWiseMultiplication.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2, typename T3 = T2>
+class CpuMulGenericValidationFixture : public framework::Fixture
+{
+public:
+ void setup(const TensorShape &shape0,
+ const TensorShape &shape1,
+ DataType dt_in1,
+ DataType dt_in2,
+ DataType dt_out,
+ float scale,
+ ConvertPolicy convert_policy,
+ RoundingPolicy rounding_policy,
+ bool is_inplace)
+ {
+ if (std::is_same<TensorType, Tensor>::value && // Cpu
+ (dt_in1 == DataType::F16 || dt_in2 == DataType::F16 || dt_out == DataType::F16) &&
+ !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
+ _is_inplace = is_inplace;
+ _target = compute_target(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ _reference =
+ compute_reference(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy,
+ QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), ActivationLayerInfo());
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, unsigned int seed_offset)
+ {
+ library->fill_tensor_uniform(tensor, seed_offset);
+ }
+
+ TensorType compute_target(const TensorShape &shape0,
+ const TensorShape &shape1,
+ DataType dt_in1,
+ DataType dt_in2,
+ DataType dt_out,
+ float scale,
+ ConvertPolicy convert_policy,
+ RoundingPolicy rounding_policy,
+ QuantizationInfo qinfo0,
+ QuantizationInfo qinfo1,
+ QuantizationInfo qinfo_out,
+ ActivationLayerInfo act_info)
+ {
+ // Create tensors
+ const TensorShape out_shape = TensorShape::broadcast_shape(shape0, shape1);
+ TensorType src1 = create_tensor<TensorType>(shape0, dt_in1, 1, qinfo0);
+ TensorType src2 = create_tensor<TensorType>(shape1, dt_in2, 1, qinfo1);
+ TensorType dst = create_tensor<TensorType>(out_shape, dt_out, 1, qinfo_out);
+
+ // Check whether do in-place computation and whether inputs are broadcast compatible
+ TensorType *actual_dst = &dst;
+ if (_is_inplace)
+ {
+ bool src1_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape0, 0) &&
+ (qinfo0 == qinfo_out) && (dt_in1 == dt_out);
+ bool src2_is_inplace = !arm_compute::detail::have_different_dimensions(out_shape, shape1, 0) &&
+ (qinfo1 == qinfo_out) && (dt_in2 == dt_out);
+ bool do_in_place = out_shape.total_size() != 0 && (src1_is_inplace || src2_is_inplace);
+ ARM_COMPUTE_ASSERT(do_in_place);
+
+ if (src1_is_inplace)
+ {
+ actual_dst = &src1;
+ }
+ else
+ {
+ actual_dst = &src2;
+ }
+ }
+
+ auto allocate_tensor = [](TensorType &t)
+ {
+ ARM_COMPUTE_ASSERT(t.info()->is_resizable());
+ t.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!t.info()->is_resizable());
+ };
+
+ // Create and configure function
+ FunctionType multiply;
+ multiply.configure(src1.info(), src2.info(), actual_dst->info(), scale, convert_policy, rounding_policy,
+ act_info);
+
+ allocate_tensor(src1);
+ allocate_tensor(src2);
+
+ // If don't do in-place computation, still need to allocate original dst
+ if (!_is_inplace)
+ {
+ allocate_tensor(dst);
+ }
+
+ // Fill tensors
+ fill(AccessorType(src1), 0);
+ fill(AccessorType(src2), 1);
+
+ // Compute function
+ ITensorPack run_pack{{arm_compute::TensorType::ACL_SRC_0, &src1},
+ {arm_compute::TensorType::ACL_SRC_1, &src2},
+ {arm_compute::TensorType::ACL_DST, actual_dst}};
+ multiply.run(run_pack);
+
+ return std::move(*actual_dst);
+ }
+
+ SimpleTensor<T3> compute_reference(const TensorShape &shape0,
+ const TensorShape &shape1,
+ DataType dt_in1,
+ DataType dt_in2,
+ DataType dt_out,
+ float scale,
+ ConvertPolicy convert_policy,
+ RoundingPolicy rounding_policy,
+ QuantizationInfo qinfo0,
+ QuantizationInfo qinfo1,
+ QuantizationInfo qinfo_out,
+ ActivationLayerInfo act_info)
+ {
+ // Create reference
+ SimpleTensor<T1> src1{shape0, dt_in1, 1, qinfo0};
+ SimpleTensor<T2> src2{shape1, dt_in2, 1, qinfo1};
+
+ // Fill reference
+ fill(src1, 0);
+ fill(src2, 1);
+
+ auto result = reference::pixel_wise_multiplication<T1, T2, T3>(src1, src2, scale, convert_policy,
+ rounding_policy, dt_out, qinfo_out);
+ return act_info.enabled() ? reference::activation_layer(result, act_info, qinfo_out) : result;
+ }
+
+ TensorType _target{};
+ SimpleTensor<T3> _reference{};
+ bool _is_inplace{false};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2, typename T3 = T2>
+class CpuMulValidationFixture
+ : public CpuMulGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>
+{
+public:
+ void setup(const TensorShape &shape,
+ DataType dt_in1,
+ DataType dt_in2,
+ DataType dt_out,
+ float scale,
+ ConvertPolicy convert_policy,
+ RoundingPolicy rounding_policy,
+ bool is_inplace)
+ {
+ CpuMulGenericValidationFixture<TensorType, AccessorType, FunctionType, T1, T2, T3>::setup(
+ shape, shape, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, is_inplace);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CPUMULFIXTURE_H
diff --git a/tests/validation/fixtures/CpuWinogradConv2dFixture.h b/tests/validation/fixtures/CpuWinogradConv2dFixture.h
new file mode 100644
index 0000000000..d390aded28
--- /dev/null
+++ b/tests/validation/fixtures/CpuWinogradConv2dFixture.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CPUWINOGRADCONV2DFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CPUWINOGRADCONV2DFIXTURE_H
+
+#include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class CpuWinogradConv2dValidationFixture : public framework::Fixture
+{
+public:
+ void setup(TensorShape input_shape,
+ TensorShape weights_shape,
+ TensorShape bias_shape,
+ TensorShape output_shape,
+ PadStrideInfo info,
+ Size2D dilation,
+ ActivationLayerInfo act_info)
+ {
+ ARM_COMPUTE_UNUSED(dilation);
+ _act_info = act_info;
+
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, float min, float max)
+ {
+ std::uniform_real_distribution<float> distribution(min, max);
+ library->fill(tensor, distribution, i);
+ }
+
+ TensorType compute_target(TensorShape input_shape,
+ TensorShape weights_shape,
+ TensorShape bias_shape,
+ TensorShape output_shape,
+ const PadStrideInfo &info)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(weights_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType weights = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+ TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, QuantizationInfo(), _data_layout);
+
+ // Create and configure function
+ auto conv = std::make_unique<FunctionType>();
+ ARM_COMPUTE_EXPECT(static_cast<bool>(conv->validate(src.info(), weights.info(), bias.info(), dst.info(), info,
+ _act_info, true)),
+ framework::LogLevel::ERRORS);
+ conv->configure(src.info(), weights.info(), bias.info(), dst.info(), info, _act_info, true);
+
+ ARM_COMPUTE_ASSERT(src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+ add_padding_x({&src, &weights, &bias, &dst}, _data_layout);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ weights.allocator()->allocate();
+ dst.allocator()->allocate();
+ bias.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+ // Fill tensors
+ fill(AccessorType(src), 0, -0.5f, 0.5f);
+ fill(AccessorType(weights), 1, -0.5f, 0.5f);
+ fill(AccessorType(bias), 2, -0.5f, 0.5f);
+
+ // Compute function
+ ITensorPack run_pack = {{ACL_SRC_0, &src}, {ACL_SRC_1, &weights}, {ACL_SRC_2, &bias}, {ACL_DST, &dst}};
+ ITensorPack prep_pack = {{ACL_SRC_1, &weights}, {ACL_SRC_2, &bias}};
+
+ auto const aux_mem_req = conv->workspace();
+ auto mg = MemoryGroup{};
+ auto ws = manage_workspace<Tensor>(aux_mem_req, mg, run_pack, prep_pack);
+
+ conv->prepare(prep_pack);
+ conv->run(run_pack);
+
+ src.allocator()->free();
+ weights.allocator()->free();
+ bias.allocator()->free();
+
+ return dst;
+ }
+
+ SimpleTensor<float> compute_reference(const TensorShape &input_shape,
+ const TensorShape &weights_shape,
+ const TensorShape &bias_shape,
+ const PadStrideInfo &info)
+ {
+ // Create reference
+ SimpleTensor<float> src_t{input_shape, _data_type, 1};
+ SimpleTensor<float> weights_t{weights_shape, _data_type, 1};
+ SimpleTensor<float> bias_t{bias_shape, _data_type, 1};
+
+ // Fill reference
+ fill(src_t, 0, -0.5f, 0.5f);
+ SimpleTensor<float> src_t1(copy_tensor<float, float>(src_t));
+
+ fill(weights_t, 1, -0.5f, 0.5f);
+ SimpleTensor<float> weights_t1(copy_tensor<float, float>(weights_t));
+ fill(bias_t, 2, -0.5f, 0.5f);
+ SimpleTensor<float> bias_t1(copy_tensor<float, float>(bias_t));
+
+ // Set output tile
+ Size2D output_tile(4U, 4U);
+ if (weights_shape[0] == 7 && weights_shape[1] == 1)
+ {
+ output_tile.width = 2;
+ output_tile.height = 1;
+ }
+ else if (weights_shape[0] == 1 && weights_shape[1] == 7)
+ {
+ output_tile.width = 1;
+ output_tile.height = 2;
+ }
+ else if (weights_shape[0] == 1)
+ {
+ output_tile.width = 1;
+ }
+ else if (weights_shape[1] == 1)
+ {
+ output_tile.height = 1;
+ }
+
+ WinogradInfo winograd_info(output_tile, Size2D(weights_shape[0], weights_shape[1]),
+ Size2D(input_shape[0], input_shape[1]), info, src_t1.data_layout());
+
+ // Compute tensor shapes for input, filter and output transforms
+ TensorShape input_transform_shape =
+ compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, _data_type), winograd_info);
+ TensorShape filter_transform_shape =
+ compute_winograd_filter_transform_shape(TensorInfo(weights_shape, 1, _data_type), winograd_info);
+ TensorShape batched_gemm_shape = input_transform_shape;
+ batched_gemm_shape[0] = filter_transform_shape[0];
+ TensorShape output_transform_shape =
+ compute_winograd_output_transform_shape(TensorInfo(batched_gemm_shape, 1, _data_type), winograd_info);
+
+ // Dummy matrix C to perform matrix multiplication
+ SimpleTensor<float> dummy_c{batched_gemm_shape, _data_type, 1};
+
+ // Compute Winograd-based convolution
+ SimpleTensor<float> input_transform_out =
+ reference::winograd_input_transform<float>(src_t1, input_transform_shape, winograd_info);
+
+ SimpleTensor<float> filter_transform_out =
+ reference::winograd_filter_transform<float>(weights_t1, filter_transform_shape, winograd_info);
+ SimpleTensor<float> batched_gemm =
+ reference::gemm<float>(input_transform_out, filter_transform_out, dummy_c, 1.0f, 0.0f);
+ SimpleTensor<float> conv_out =
+ reference::winograd_output_transform<float>(batched_gemm, bias_t1, output_transform_shape, winograd_info);
+ SimpleTensor<float> conv_out_t(copy_tensor<float, float>(conv_out));
+ return (_act_info.enabled()) ? reference::activation_layer<float>(conv_out_t, _act_info) : conv_out_t;
+ }
+
+ TensorType _target{};
+ SimpleTensor<float> _reference{};
+ ActivationLayerInfo _act_info{};
+ DataType _data_type{DataType::F32};
+ DataLayout _data_layout{DataLayout::NHWC};
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CPUWINOGRADCONV2DFIXTURE_H
diff --git a/tests/validation/fixtures/CropResizeFixture.h b/tests/validation/fixtures/CropResizeFixture.h
index 30a3fd8569..51db5e0947 100644
--- a/tests/validation/fixtures/CropResizeFixture.h
+++ b/tests/validation/fixtures/CropResizeFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_SLICE_OPERATIONS_FIXTURE
-#define ARM_COMPUTE_TEST_SLICE_OPERATIONS_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_CROPRESIZEFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_CROPRESIZEFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -49,6 +49,12 @@ public:
void setup(TensorShape src_shape, TensorShape boxes_shape, Coordinates2D crop_size, InterpolationPolicy method,
float extrapolation_value, bool is_outside_bounds, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(src_shape, boxes_shape, crop_size, method, extrapolation_value, is_outside_bounds, data_type);
_reference = compute_reference(src_shape, boxes_shape, crop_size, method, extrapolation_value, is_outside_bounds, data_type);
}
@@ -131,4 +137,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SLICE_OPERATIONS_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_CROPRESIZEFIXTURE_H
diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h
index 83170c413c..30443cc742 100644
--- a/tests/validation/fixtures/DeconvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,6 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DECONVOLUTIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DECONVOLUTIONLAYERFIXTURE_H
+
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
@@ -53,6 +57,12 @@ public:
DataType data_type, DataType weights_data_type, DataLayout data_layout,
QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, QuantizationInfo weights_quantization_info, bool add_bias)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ (data_type == DataType::F16 || weights_data_type == DataType::F16) && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_data_type = data_type;
_weights_data_type = weights_data_type;
_bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
@@ -248,6 +258,12 @@ public:
void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
@@ -267,6 +283,12 @@ public:
void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int pad_left, unsigned int pad_right, unsigned int pad_top,
unsigned int pad_bottom, unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, pad_left, pad_right, pad_top, pad_bottom, DimensionRoundingType::CEIL);
@@ -286,6 +308,12 @@ public:
void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady,
unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
@@ -307,6 +335,12 @@ public:
unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias,
DataType weights_data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ (data_type == DataType::F16 || weights_data_type == DataType::F16) && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL);
@@ -331,3 +365,5 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
+
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DECONVOLUTIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/DepthConvertLayerFixture.h b/tests/validation/fixtures/DepthConvertLayerFixture.h
index f55d20bf3e..7a60ca8bf5 100644
--- a/tests/validation/fixtures/DepthConvertLayerFixture.h
+++ b/tests/validation/fixtures/DepthConvertLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_DEPTH_CONVERT_FIXTURE
-#define ARM_COMPUTE_TEST_DEPTH_CONVERT_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DEPTHCONVERTLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DEPTHCONVERTLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -47,6 +47,12 @@ class DepthConvertLayerValidationBaseFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, QuantizationInfo quantization_info)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ (dt_in == DataType::F16 || dt_out == DataType::F16) && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_shift = shift;
_quantization_info = quantization_info;
_target = compute_target(shape, dt_in, dt_out, policy, shift);
@@ -149,4 +155,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_DEPTH_CONVERT_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DEPTHCONVERTLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index 6e2e3a3846..055e74de89 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -91,6 +91,15 @@ public:
DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false, bool in_place = false, bool run_twice = false)
{
ARM_COMPUTE_ERROR_ON(mixed_layout && in_place);
+
+ _skip_test = false;
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ (input_data_type == DataType::F16 || weights_data_type == DataType::F16) && !CPUInfo::get().has_fp16())
+ {
+ _skip_test = true;
+ return;
+ }
+
// This hash is used by random generators. There may be hash collisions but
// this is intentional as it's a very easy way to make the the current
// random generation process almost different for many test configurations,
@@ -374,6 +383,7 @@ protected:
bool _in_place{ false };
bool _run_twice{ false };
bool _use_dynamic_output_quant{false};
+ bool _skip_test{false};
int32_t _hash{0};
// Random initialization limits
diff --git a/tests/validation/fixtures/DequantizationLayerFixture.h b/tests/validation/fixtures/DequantizationLayerFixture.h
index 4eb25a5bc5..165cd423df 100644
--- a/tests/validation/fixtures/DequantizationLayerFixture.h
+++ b/tests/validation/fixtures/DequantizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_DEQUANTIZATION_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_DEQUANTIZATION_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DEQUANTIZATIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DEQUANTIZATIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -49,6 +49,12 @@ class DequantizationValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType src_data_type, DataType dst_datatype, DataLayout data_layout)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ (src_data_type == DataType::F16 || dst_datatype == DataType::F16) && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_quantization_info = generate_quantization_info(src_data_type, shape.z());
_target = compute_target(shape, src_data_type, dst_datatype, data_layout);
_reference = compute_reference(shape, src_data_type);
@@ -164,4 +170,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_DEQUANTIZATION_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DEQUANTIZATIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/DirectConvolution3DFixture.h b/tests/validation/fixtures/DirectConvolution3DFixture.h
index e27a41a23b..fb8db15a47 100644
--- a/tests/validation/fixtures/DirectConvolution3DFixture.h
+++ b/tests/validation/fixtures/DirectConvolution3DFixture.h
@@ -54,6 +54,12 @@ public:
{
ARM_COMPUTE_ERROR_ON(data_layout != DataLayout::NDHWC);
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
const TensorShape weights_shape(num_kernels, input_shape[0], kernel_width, kernel_height, kernel_depth);
const TensorShape bias_shape(num_kernels);
const DataType bias_data_type = is_data_type_quantized(data_type) ? DataType::S32 : data_type;
diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
index 6f204642ca..debfce9142 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -93,6 +93,12 @@ public:
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
// This hash is used by random generators. There may be hash collisions but
// this is intentional as it's a very easy way to make the the current
// random generation process almost different for many test configurations,
@@ -133,6 +139,12 @@ public:
ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN);
ARM_COMPUTE_UNUSED(dilation);
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
// This hash is used by random generators. There may be hash collisions but
// this is intentional as it's a very easy way to make the the current
// random generation process almost different for many test configurations,
diff --git a/tests/validation/fixtures/ElementwiseOperationsFixture.h b/tests/validation/fixtures/ElementwiseOperationsFixture.h
index f36a1f75b7..0c809b001b 100644
--- a/tests/validation/fixtures/ElementwiseOperationsFixture.h
+++ b/tests/validation/fixtures/ElementwiseOperationsFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,6 +50,13 @@ public:
DataType data_type0, DataType data_type1, DataType output_data_type,
QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out, bool is_inplace = false, bool use_dynamic_shape = false)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ (data_type0 == DataType::F16 || data_type1 == DataType::F16 || output_data_type == DataType::F16) &&
+ !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_op = op;
_use_dynamic_shape = use_dynamic_shape;
_is_inplace = is_inplace;
diff --git a/tests/validation/fixtures/ElementwiseUnaryFixture.h b/tests/validation/fixtures/ElementwiseUnaryFixture.h
index 15344288db..70f6ea9172 100644
--- a/tests/validation/fixtures/ElementwiseUnaryFixture.h
+++ b/tests/validation/fixtures/ElementwiseUnaryFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
-#define ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_ELEMENTWISEUNARYFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_ELEMENTWISEUNARYFIXTURE_H
#include "arm_compute/core/QuantizationInfo.h"
#include "arm_compute/core/TensorShape.h"
@@ -53,6 +53,12 @@ public:
void setup(TensorShape input_shape, DataType input_data_type, bool in_place, ElementWiseUnary op,
bool use_dynamic_shape = false, QuantizationInfo qinfo = QuantizationInfo(), QuantizationInfo qinfo_out = QuantizationInfo())
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ input_data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_op = op;
_target = compute_target(input_shape, input_data_type, in_place, qinfo, qinfo_out);
_reference = compute_reference(input_shape, input_data_type, qinfo, qinfo_out);
@@ -444,4 +450,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_ELEMENTWISEUNARYFIXTURE_H
diff --git a/tests/validation/fixtures/FlattenLayerFixture.h b/tests/validation/fixtures/FlattenLayerFixture.h
index e72487c7cf..ee48e1d6cf 100644
--- a/tests/validation/fixtures/FlattenLayerFixture.h
+++ b/tests/validation/fixtures/FlattenLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_FLATTEN_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_FLATTEN_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_FLATTENLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_FLATTENLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -52,6 +52,12 @@ class FlattenLayerValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
TensorShape shape_flatten;
TensorInfo input_info(shape, 1, data_type);
shape_flatten = compute_flatten_shape(&input_info);
@@ -118,4 +124,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_FLATTEN_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_FLATTENLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/FloorFixture.h b/tests/validation/fixtures/FloorFixture.h
index 7d38666f47..5cbf2b8e9c 100644
--- a/tests/validation/fixtures/FloorFixture.h
+++ b/tests/validation/fixtures/FloorFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_FLOOR_FIXTURE
-#define ARM_COMPUTE_TEST_FLOOR_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_FLOORFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_FLOORFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -46,6 +46,12 @@ class FloorValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, data_type);
_reference = compute_reference(shape, data_type);
}
@@ -103,4 +109,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_FLOOR_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_FLOORFIXTURE_H
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index 344187868f..481a3b7659 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -92,6 +92,12 @@ public:
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights,
DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info, bool mixed_layout = false)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type==DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
ARM_COMPUTE_UNUSED(weights_shape);
ARM_COMPUTE_UNUSED(bias_shape);
@@ -459,6 +465,12 @@ public:
void setup(TensorShape src_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape dst_shape,
DataType data_type, ActivationLayerInfo activation_info, bool constant_weights, bool constant_bias, bool weights_reshaped, bool remove_bias = false)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type==DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_data_type = data_type;
const bool is_quantized = is_data_type_quantized(data_type);
diff --git a/tests/validation/fixtures/FuseBatchNormalizationFixture.h b/tests/validation/fixtures/FuseBatchNormalizationFixture.h
index a05e4169a7..61affff6ba 100644
--- a/tests/validation/fixtures/FuseBatchNormalizationFixture.h
+++ b/tests/validation/fixtures/FuseBatchNormalizationFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_FUSEBATCHNORMALIZATION_FIXTURE
-#define ARM_COMPUTE_TEST_FUSEBATCHNORMALIZATION_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_FUSEBATCHNORMALIZATIONFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_FUSEBATCHNORMALIZATIONFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -49,6 +49,12 @@ class FuseBatchNormalizationFixture : public framework::Fixture
public:
void setup(TensorShape shape_w, DataType data_type, DataLayout data_layout, bool in_place, bool with_bias, bool with_gamma, bool with_beta)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
std::tie(_target_w, _target_b) = compute_target(shape_w, data_type, data_layout, in_place, with_bias, with_gamma, with_beta);
std::tie(_reference_w, _reference_b) = compute_reference(shape_w, data_type, with_bias, with_gamma, with_beta);
}
@@ -202,4 +208,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_FUSEBATCHNORMALIZATION_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_FUSEBATCHNORMALIZATIONFIXTURE_H
diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h
index 94bedc83e1..34c0574412 100644
--- a/tests/validation/fixtures/GEMMFixture.h
+++ b/tests/validation/fixtures/GEMMFixture.h
@@ -51,6 +51,12 @@ class GEMMGenericValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape output_shape, float alpha, float beta, bool pretranspose, DataType data_type, bool accumulate=false)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
ARM_COMPUTE_UNUSED(pretranspose);
_target = compute_target(shape_a, shape_b, shape_c, output_shape, alpha, beta, data_type, accumulate);
_reference = compute_reference(shape_a, shape_b, output_shape, alpha, beta, data_type, accumulate);
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index aa4eedb75d..854442b174 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -25,6 +25,7 @@
#define ACL_TESTS_VALIDATION_FIXTURES_GEMMLOWPFIXTURE_H
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
#include "src/core/utils/quantization/AsymmHelpers.h"
#include "tests/validation/Helpers.h"
#include "tests/framework/Fixture.h"
@@ -91,18 +92,113 @@ struct TensorFillInfo
};
template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false, bool run_twice = false>
-TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo,
+TensorType compute_gemmlowp_target_for_updated_sq_info_after_config(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo,
const QuantizationInfo& output_qinfo, DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8,
GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(), bool reshape_b_only_on_first_run = false, const TensorFillInfo& finfo = TensorFillInfo(),
- bool accumulate = false, bool dynamic_qinfo = false, DataType data_type_output = DataType::UNKNOWN)
+ bool accumulate = false, DataType data_type_output = DataType::UNKNOWN)
{
+ ARM_COMPUTE_ASSERT((std::is_same<FunctionType, NEGEMMLowpMatrixMultiplyCore>::value == true));
ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type_a));
ARM_COMPUTE_ASSERT(data_type_a == data_type_b);
+
// If unknown, set to sensible defaults
if (data_type_output == DataType::UNKNOWN) {
data_type_output = output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : data_type_a;
}
+ // Create tensors with fake quantization info and defer to pass the correct ones to a later stage.
+ auto qi = QuantizationInfo(0.550721, -37, true);
+ TensorType a = create_tensor<TensorType>(shape_a, data_type_a, 1, qi);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type_b, 1, qi);
+ TensorType output = create_tensor<TensorType>(shape_output, data_type_output, 1, qi);
+
+ TensorType bias;
+ if(is_fused)
+ {
+ TensorShape bias_shape(shape_b[0]);
+ bias = create_tensor<TensorType>(bias_shape,data_type_output == DataType::F32 ? DataType::F32 : DataType::S32, 1);
+ }
+
+ // Create and configure function
+ // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
+ FunctionType gemmlowp;
+
+ gemmlowp.configure(&a, &b, is_fused ? &bias : nullptr, &output, GEMMInfo(false, false, reshape_b_only_on_first_run, (reinterpret_output_as_3d ? shape_output[2] : 0), reinterpret_input_as_3d, false,
+ output_stage, false /*fp_mixed_precision*/, false /*fast_math*/, false /*broadcast_bias*/,
+ arm_compute::ActivationLayerInfo(), false /* fixed_format */, arm_compute::WeightFormat::UNSPECIFIED,
+ false /* pretranspose_B */, accumulate));
+
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(output.info()->is_resizable());
+
+ add_padding_x({ &a, &b, &output });
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+ output.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!output.info()->is_resizable());
+
+ // Fill tensors
+ fill_quantized(AccessorType(a), 0 + finfo.hash);
+ fill_quantized(AccessorType(b), 1 + finfo.hash);
+
+ if (accumulate)
+ {
+ ARM_COMPUTE_ASSERT(accumulate != run_twice);
+ fill(AccessorType(output), 6 + finfo.hash, finfo.min_output, finfo.max_output);
+ }
+
+ if(is_fused)
+ {
+ ARM_COMPUTE_ASSERT(bias.info()->is_resizable());
+ bias.allocator()->allocate();
+ ARM_COMPUTE_ASSERT(!bias.info()->is_resizable());
+ fill(AccessorType(bias), 2 + finfo.hash, finfo.min_bias, finfo.max_bias);
+ }
+
+ // Run with variable inputs.
+ if(run_twice)
+ {
+ gemmlowp.run();
+ fill_quantized(AccessorType(a), 3 + finfo.hash); // Fill tensors with new seed after run
+ fill_quantized(AccessorType(b), 4 + finfo.hash);
+ if(is_fused)
+ {
+ fill(AccessorType(bias), 5 + finfo.hash, finfo.min_bias, finfo.max_bias);
+ }
+ }
+
+ // now properly set the correct quantization info and update ACL
+ a.info()->set_quantization_info(QuantizationInfo(a_qinfo.scale(), a_qinfo.offset(), true));
+ b.info()->set_quantization_info(QuantizationInfo(b_qinfo.scale(), b_qinfo.offset(), true));
+ output.info()->set_quantization_info(QuantizationInfo(output_qinfo.scale(), output_qinfo.offset(), true));
+
+ // propagate trough ACL the correct quantization info
+ NEGEMMLowpMatrixMultiplyCore *lp = reinterpret_cast<NEGEMMLowpMatrixMultiplyCore *>(&gemmlowp);
+ lp->update_quantization_parameters();
+
+ // Compute GEMM function
+ gemmlowp.run();
+ return output;
+}
+
+template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d, bool reinterpret_output_as_3d, typename OutputType, bool is_fused = false, bool run_twice = false>
+TensorType compute_gemmlowp_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo,
+ const QuantizationInfo& output_qinfo, DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8,
+ GEMMLowpOutputStageInfo output_stage = GEMMLowpOutputStageInfo(), bool reshape_b_only_on_first_run = false, const TensorFillInfo& finfo = TensorFillInfo(),
+ bool accumulate = false, bool dynamic_qinfo = false, DataType data_type_output = DataType::UNKNOWN)
+{
+ ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type_a));
+ // If unknown, set to sensible defaults
+ if (data_type_output == DataType::UNKNOWN) {
+ data_type_output = output_stage.type == GEMMLowpOutputStageType::NONE ? DataType::S32 : data_type_a;
+ }
+
// Create tensors
TensorType a = create_tensor<TensorType>(shape_a, data_type_a, 1, dynamic_qinfo ? QuantizationInfo(1.0,0,true) : a_qinfo);
TensorType b = create_tensor<TensorType>(shape_b, data_type_b, 1, dynamic_qinfo ? QuantizationInfo(1.0,0,true) : b_qinfo); // gemm output before output stage mismatch if i pass data_layout_output here. to be investigated
@@ -185,7 +281,6 @@ SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, con
DataType data_type_a = DataType::QASYMM8, DataType data_type_b = DataType::QASYMM8, const TensorFillInfo& finfo = TensorFillInfo())
{
ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type_a));
- ARM_COMPUTE_ASSERT(data_type_a == data_type_b);
TensorShape shape_a_to_use = shape_a;
if(reinterpret_input_as_3d)
{
@@ -412,7 +507,7 @@ public:
*
*/
void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, GEMMLowpOutputStageType output_stage_type, DataType data_type,
- bool reshape_b_only_on_first_run)
+ bool reshape_b_only_on_first_run, bool updated_sq_info_after_config = false)
{
ARM_COMPUTE_ASSERT(output_stage_type != GEMMLowpOutputStageType::NONE);
ARM_COMPUTE_ASSERT(is_data_type_quantized_asymmetric(data_type));
@@ -429,15 +524,23 @@ public:
init_gemmlowp_output_stage_info(data_type, a_qinfo, b_qinfo, output_qinfo, output_stage_type, output_stage);
_reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, data_type, data_type, output_stage, finfo);
- _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, data_type, data_type, output_stage, reshape_b_only_on_first_run, finfo);
+ _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, data_type, data_type, output_stage, reshape_b_only_on_first_run, finfo, updated_sq_info_after_config);
}
protected:
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const QuantizationInfo& output_qinfo,
- DataType data_type_a, DataType data_type_b, const GEMMLowpOutputStageInfo& output_stage, bool reshape_b_only_on_first_run = false, const TensorFillInfo& finfo = TensorFillInfo())
+ DataType data_type_a, DataType data_type_b, const GEMMLowpOutputStageInfo& output_stage, bool reshape_b_only_on_first_run = false, const TensorFillInfo& finfo = TensorFillInfo(), bool updated_sq_info_after_config = false)
{
- return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true, run_twice>(shape_a, shape_b, shape_output, a_qinfo,
- b_qinfo, output_qinfo, data_type_a, data_type_b, output_stage, reshape_b_only_on_first_run, finfo);
+ if (updated_sq_info_after_config)
+ {
+ return compute_gemmlowp_target_for_updated_sq_info_after_config<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true, run_twice>(shape_a, shape_b, shape_output, a_qinfo,
+ b_qinfo, output_qinfo, data_type_a, data_type_b, output_stage, reshape_b_only_on_first_run, finfo);
+ }
+ else
+ {
+ return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, qasymm8_t, true, run_twice>(shape_a, shape_b, shape_output, a_qinfo,
+ b_qinfo, output_qinfo, data_type_a, data_type_b, output_stage, reshape_b_only_on_first_run, finfo);
+ }
}
SimpleTensor<TI> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo,
@@ -472,29 +575,59 @@ template <typename TensorType, typename AccessorType, typename FunctionType, boo
class GEMMLowpDequantizedMatrixMultiplyValidationFixture : public framework::Fixture
{
public:
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, bool accumulate)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, DataType data_type_a, DataType data_type_b, bool accumulate)
{
const bool dynamic_qinfo = false;
const auto a_qinfo = QuantizationInfo(1.0f / 255, a_offset);
const auto b_qinfo = QuantizationInfo(5.0f / 255, b_offset);
TensorFillInfo finfo;
- _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate, dynamic_qinfo);
- _reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate, dynamic_qinfo);
+ _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, data_type_a, data_type_b, finfo,
+ accumulate, dynamic_qinfo);
+ _reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, data_type_a, data_type_b,
+ finfo, accumulate, dynamic_qinfo);
}
protected:
- TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, const bool accumulate, const bool dynamic_qinfo)
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, DataType data_type_a, DataType data_type_b, const TensorFillInfo& finfo, const bool accumulate, const bool dynamic_qinfo)
{
const auto output_qinfo = QuantizationInfo();
- return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, GEMMLowpOutputStageInfo(), false, finfo, accumulate, dynamic_qinfo, DataType::F32);
+ return compute_gemmlowp_target<TensorType, AccessorType, FunctionType, reinterpret_input_as_3d, reinterpret_output_as_3d, int32_t, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, data_type_a, data_type_b, GEMMLowpOutputStageInfo(), false, finfo, accumulate, dynamic_qinfo, DataType::F32);
}
- SimpleTensor<float> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, bool accumulate, const bool dynamic_qinfo)
+ SimpleTensor<float> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, DataType data_type_a, DataType data_type_b, const TensorFillInfo& finfo, bool accumulate, const bool dynamic_qinfo)
{
QuantizationInfo s32_ref_output_quant_info = QuantizationInfo(a_qinfo.uniform().scale * b_qinfo.uniform().scale, 0, dynamic_qinfo);
- SimpleTensor<int32_t> s32_ref_output = compute_gemmlowp_reference<reinterpret_input_as_3d, int8_t, int8_t, false, false, run_twice>(shape_a, shape_b, shape_output, a_qinfo, b_qinfo,
- DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, finfo);
+ SimpleTensor<int32_t> s32_ref_output;
+ if (data_type_a == DataType::QASYMM8)
+ {
+ if (data_type_b == DataType::QASYMM8)
+ {
+ s32_ref_output = compute_gemmlowp_reference<reinterpret_input_as_3d, uint8_t, uint8_t, false, false, run_twice>(
+ shape_a, shape_b, shape_output, a_qinfo, b_qinfo, data_type_a, data_type_b, finfo);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR_ON(data_type_b != DataType::QASYMM8_SIGNED);
+ s32_ref_output = compute_gemmlowp_reference<reinterpret_input_as_3d, uint8_t, int8_t, false, false, run_twice>(
+ shape_a, shape_b, shape_output, a_qinfo, b_qinfo, data_type_a, data_type_b, finfo);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR_ON(data_type_a != DataType::QASYMM8_SIGNED);
+ if (data_type_b == DataType::QASYMM8)
+ {
+ ARM_COMPUTE_ERROR("QASYMM8_SIGNED input with QASYMM8 weights not supported");
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR_ON(data_type_b != DataType::QASYMM8_SIGNED);
+ s32_ref_output = compute_gemmlowp_reference<reinterpret_input_as_3d, int8_t, int8_t, false, false, run_twice>(
+ shape_a, shape_b, shape_output, a_qinfo, b_qinfo, data_type_a, data_type_b, finfo);
+ }
+ }
+
s32_ref_output.quantization_info(s32_ref_output_quant_info);
SimpleTensor<float> f32_ref_output(s32_ref_output.shape(), DataType::F32);
diff --git a/tests/validation/fixtures/Im2ColFixture.h b/tests/validation/fixtures/Im2ColFixture.h
index 5c7978f4ab..8fb53c35b4 100644
--- a/tests/validation/fixtures/Im2ColFixture.h
+++ b/tests/validation/fixtures/Im2ColFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_IM2COL_FIXTURE
-#define ARM_COMPUTE_TEST_IM2COL_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_IM2COLFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_IM2COLFIXTURE_H
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/TensorShape.h"
@@ -51,6 +51,12 @@ public:
void setup(TensorShape input_shape, DataType data_type, const Size2D &kernel_dims, const PadStrideInfo &conv_info, const QuantizationInfo &quant_info, const DataLayout &data_layout,
unsigned int num_groups)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_kernel_dims = kernel_dims;
_conv_info = conv_info;
_quant_info = quant_info;
@@ -136,4 +142,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_IM2COL_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_IM2COLFIXTURE_H
diff --git a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
index c26dd99f02..b78b742e09 100644
--- a/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/InstanceNormalizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_INSTANCENORMALIZATION_FIXTURE
-#define ARM_COMPUTE_TEST_INSTANCENORMALIZATION_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_INSTANCENORMALIZATIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_INSTANCENORMALIZATIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -46,6 +46,12 @@ class InstanceNormalizationLayerValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType data_type, DataLayout data_layout, bool in_place)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, data_type, data_layout, in_place);
_reference = compute_reference(shape, data_type);
}
@@ -146,4 +152,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_INSTANCENORMALIZATION_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_INSTANCENORMALIZATIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/L2NormalizeLayerFixture.h b/tests/validation/fixtures/L2NormalizeLayerFixture.h
index b8f4b1eaf3..9e65f1eaa5 100644
--- a/tests/validation/fixtures/L2NormalizeLayerFixture.h
+++ b/tests/validation/fixtures/L2NormalizeLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_L2NORMALIZE_FIXTURE
-#define ARM_COMPUTE_TEST_L2NORMALIZE_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_L2NORMALIZELAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_L2NORMALIZELAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -50,6 +50,12 @@ class L2NormalizeLayerValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType data_type, DataLayout data_layout, int axis, float epsilon)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, data_type, data_layout, axis, epsilon);
_reference = compute_reference(shape, data_type, data_layout, axis, epsilon);
}
@@ -134,4 +140,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_L2NORMALIZE_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_L2NORMALIZELAYERFIXTURE_H
diff --git a/tests/validation/fixtures/LSTMLayerFixture.h b/tests/validation/fixtures/LSTMLayerFixture.h
index a32e9adfe5..fa7c7d1d90 100644
--- a/tests/validation/fixtures/LSTMLayerFixture.h
+++ b/tests/validation/fixtures/LSTMLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2023 Arm Limited.
+ * Copyright (c) 2018-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_LSTM_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_LSTM_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_LSTMLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_LSTMLAYERFIXTURE_H
+
+#include "arm_compute/runtime/Allocator.h"
+#include "arm_compute/runtime/BlobLifetimeManager.h"
+#include "arm_compute/runtime/CL/CLBufferAllocator.h"
+#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+#include "arm_compute/runtime/PoolManager.h"
#include "tests/Globals.h"
#include "tests/framework/Asserts.h"
@@ -48,10 +54,16 @@ class LSTMLayerValidationFixture : public framework::Fixture
public:
void setup(TensorShape input_shape, TensorShape input_weights_shape, TensorShape recurrent_weights_shape, TensorShape cell_bias_shape, TensorShape output_cell_shape, TensorShape output_shape,
TensorShape scratch_shape, ActivationLayerInfo info, float cell_threshold, float projection_threshold, DataType data_type, bool projection_opt, bool peephole_opt,
- bool use_layer_norm)
+ bool use_layer_norm, bool use_memory_manager)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(input_shape, input_weights_shape, recurrent_weights_shape, cell_bias_shape, output_cell_shape, output_shape, scratch_shape, info, cell_threshold, projection_threshold,
- data_type, projection_opt, peephole_opt, use_layer_norm);
+ data_type, projection_opt, peephole_opt, use_layer_norm, use_memory_manager);
_reference = compute_reference(input_shape, input_weights_shape, recurrent_weights_shape, cell_bias_shape, output_cell_shape, output_shape, scratch_shape, info, cell_threshold, projection_threshold,
data_type, projection_opt, peephole_opt, use_layer_norm);
}
@@ -77,7 +89,7 @@ protected:
}
TensorType compute_target(const TensorShape &input_shape, const TensorShape &input_weights_shape, const TensorShape &recurrent_weights_shape, const TensorShape &cell_bias_shape,
const TensorShape &output_cell_shape, const TensorShape &output_shape, const TensorShape &scratch_shape, ActivationLayerInfo info, float cell_threshold,
- float projection_threshold, DataType data_type, bool projection_opt, bool peephole_opt, bool use_layer_norm)
+ float projection_threshold, DataType data_type, bool projection_opt, bool peephole_opt, bool use_layer_norm, bool use_memory_manager)
{
const unsigned int num_cells = input_weights_shape.y();
const unsigned int num_outputs = recurrent_weights_shape.x();
@@ -159,7 +171,17 @@ protected:
}
// Create and configure function
- FunctionType lstm;
+ std::shared_ptr<MemoryManagerOnDemand> mm = nullptr;
+
+ if(use_memory_manager)
+ {
+ auto lifetime_mgr = std::make_shared<BlobLifetimeManager>();
+ auto pool_mgr = std::make_shared<PoolManager>();
+ mm = std::make_shared<MemoryManagerOnDemand>(lifetime_mgr, pool_mgr);
+ }
+
+ FunctionType lstm(mm);
+
lstm.configure(&input, &input_to_forget_w, &input_to_cell_w, &input_to_output_w, &recurrent_to_forget_w,
&recurrent_to_cell_w, &recurrent_to_output_w, &forget_gate_bias, &cell_bias, &output_gate_bias,
&output_state_in, &cell_state_in,
@@ -314,8 +336,27 @@ protected:
}
// Compute function
+ if(use_memory_manager)
+ {
+ if(std::is_same<TensorType, Tensor>::value)
+ {
+ Allocator alloc{};
+ mm->populate(alloc, 1);
+ }
+ else
+ {
+ CLBufferAllocator alloc{};
+ mm->populate(alloc, 1);
+ }
+ }
+
lstm.run();
+ if(use_memory_manager)
+ {
+ mm->clear();
+ }
+
_target_scratch = std::move(scratch);
return output;
}
@@ -535,4 +576,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_LSTM_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_LSTMLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/MatMulFixture.h b/tests/validation/fixtures/MatMulFixture.h
index ffd12e56d0..4ccd2b8266 100644
--- a/tests/validation/fixtures/MatMulFixture.h
+++ b/tests/validation/fixtures/MatMulFixture.h
@@ -65,6 +65,12 @@ public:
QuantizationInfo b_qinfo = QuantizationInfo(),
QuantizationInfo o_qinfo = QuantizationInfo())
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
// For brevity, the input shapes are assumed to be not-transposed for both a and b matrices.
if (transpose_a)
{
diff --git a/tests/validation/fixtures/MaxUnpoolingLayerFixture.h b/tests/validation/fixtures/MaxUnpoolingLayerFixture.h
index 808e3ffabd..1fd2049272 100644
--- a/tests/validation/fixtures/MaxUnpoolingLayerFixture.h
+++ b/tests/validation/fixtures/MaxUnpoolingLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021, 2023 Arm Limited.
+ * Copyright (c) 2020-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_MAXUNPOOLINGLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_MAXUNPOOLINGLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -48,6 +48,12 @@ class MaxUnpoolingLayerValidationGenericFixture : public framework::Fixture
public:
void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
std::mt19937 gen(library->seed());
std::uniform_int_distribution<> offset_dis(0, 20);
const float scale = data_type == DataType::QASYMM8_SIGNED ? 1.f / 127.f : 1.f / 255.f;
@@ -159,4 +165,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_MAXUNPOOLINGLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h b/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h
index bf5d20790c..f8176e82ae 100644
--- a/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/MeanStdDevNormalizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2023 Arm Limited.
+ * Copyright (c) 2019-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_MEAN_STDDEV_NORMALIZATION_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_MEAN_STDDEV_NORMALIZATION_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_MEANSTDDEVNORMALIZATIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_MEANSTDDEVNORMALIZATIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -46,6 +46,12 @@ class MeanStdDevNormalizationLayerValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType dt, bool in_place, float epsilon = 1e-8)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ dt == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
QuantizationInfo qi = QuantizationInfo(0.5f, 10);
_data_type = dt;
_target = compute_target(shape, dt, in_place, epsilon, qi);
@@ -128,4 +134,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_MEAN_STDDEV_NORMALIZATION_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_MEANSTDDEVNORMALIZATIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/NormalizationLayerFixture.h b/tests/validation/fixtures/NormalizationLayerFixture.h
index ddaa3533f5..06ec88b5af 100644
--- a/tests/validation/fixtures/NormalizationLayerFixture.h
+++ b/tests/validation/fixtures/NormalizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_NORMALIZATION_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_NORMALIZATION_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_NORMALIZATIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_NORMALIZATIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -48,6 +48,12 @@ class NormalizationValidationGenericFixture : public framework::Fixture
public:
void setup(TensorShape shape, NormType norm_type, int norm_size, float beta, bool is_scaled, DataType data_type, DataLayout data_layout)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
NormalizationLayerInfo info(norm_type, norm_size, 5, beta, 1.f, is_scaled);
_target = compute_target(shape, info, data_type, data_layout);
@@ -126,4 +132,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_NORMALIZATION_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_NORMALIZATIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/PadLayerFixture.h b/tests/validation/fixtures/PadLayerFixture.h
index 93b43616ff..10c7cdab82 100644
--- a/tests/validation/fixtures/PadLayerFixture.h
+++ b/tests/validation/fixtures/PadLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_PADLAYER_FIXTURE
-#define ARM_COMPUTE_TEST_PADLAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_PADLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_PADLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -46,6 +46,12 @@ class PaddingFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType data_type, const PaddingList &padding, const PaddingMode mode)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
PaddingList clamped_padding = padding;
if(mode != PaddingMode::CONSTANT)
{
@@ -132,4 +138,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_PADLAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_PADLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/PixelWiseMultiplicationFixture.h b/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
index 4345d8a13f..213e7355a5 100644
--- a/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
+++ b/tests/validation/fixtures/PixelWiseMultiplicationFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_PIXEL_WISE_MULTIPLICATION_FIXTURE
-#define ARM_COMPUTE_TEST_PIXEL_WISE_MULTIPLICATION_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_PIXELWISEMULTIPLICATIONFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_PIXELWISEMULTIPLICATIONFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -58,6 +58,13 @@ public:
ActivationLayerInfo act_info,
bool is_inplace)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ (dt_in1 == DataType::F16 || dt_in2 == DataType::F16 || dt_out == DataType::F16) &&
+ !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_is_inplace = is_inplace;
_target = compute_target(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info);
_reference = compute_reference(shape0, shape1, dt_in1, dt_in2, dt_out, scale, convert_policy, rounding_policy, qinfo0, qinfo1, qinfo_out, act_info);
@@ -233,4 +240,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_PIXEL_WISE_MULTIPLICATION_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_PIXELWISEMULTIPLICATIONFIXTURE_H
diff --git a/tests/validation/fixtures/Pooling3dLayerFixture.h b/tests/validation/fixtures/Pooling3dLayerFixture.h
index 1bdf615fb1..1d9ee58df9 100644
--- a/tests/validation/fixtures/Pooling3dLayerFixture.h
+++ b/tests/validation/fixtures/Pooling3dLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_POOLING_3D_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_POOLING_3D_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_POOLING3DLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_POOLING3DLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -47,6 +47,12 @@ class Pooling3dLayerValidationGenericFixture : public framework::Fixture
public:
void setup(TensorShape shape, Pooling3dLayerInfo pool_info, DataType data_type, QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo())
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, pool_info, data_type, input_qinfo, output_qinfo);
_reference = compute_reference(shape, pool_info, data_type, input_qinfo, output_qinfo);
}
@@ -161,4 +167,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_POOLING_3D_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_POOLING3DLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index 59c920868b..c7265a0e8a 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_POOLINGLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_POOLINGLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -48,6 +48,12 @@ public:
void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, bool indices = false,
QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo(), bool mixed_layout = false)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_mixed_layout = mixed_layout;
_pool_info = pool_info;
_target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
@@ -225,4 +231,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_POOLINGLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/QuantizationLayerFixture.h b/tests/validation/fixtures/QuantizationLayerFixture.h
index 1b21967bda..1cc0a56399 100644
--- a/tests/validation/fixtures/QuantizationLayerFixture.h
+++ b/tests/validation/fixtures/QuantizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_QUANTIZATION_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_QUANTIZATION_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_QUANTIZATIONLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_QUANTIZATIONLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -48,6 +48,12 @@ class QuantizationValidationGenericFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ (data_type_in == DataType::F16 || data_type_out == DataType::F16) && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, data_type_in, data_type_out, qinfo, qinfo_in);
_reference = compute_reference(shape, data_type_in, data_type_out, qinfo, qinfo_in);
}
@@ -116,4 +122,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_QUANTIZATION_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_QUANTIZATIONLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/RNNLayerFixture.h b/tests/validation/fixtures/RNNLayerFixture.h
index e9a05e7838..8741ef4fae 100644
--- a/tests/validation/fixtures/RNNLayerFixture.h
+++ b/tests/validation/fixtures/RNNLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_RNN_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_RNN_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_RNNLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_RNNLAYERFIXTURE_H
#include "tests/Globals.h"
#include "tests/framework/Asserts.h"
@@ -45,6 +45,12 @@ public:
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape recurrent_weights_shape, TensorShape bias_shape, TensorShape output_shape, ActivationLayerInfo info,
DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(input_shape, weights_shape, recurrent_weights_shape, bias_shape, output_shape, info, data_type);
_reference = compute_reference(input_shape, weights_shape, recurrent_weights_shape, bias_shape, output_shape, info, data_type);
}
@@ -144,4 +150,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_RNN_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_RNNLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/ROIAlignLayerFixture.h b/tests/validation/fixtures/ROIAlignLayerFixture.h
index ad76dcbbd9..fd076862dd 100644
--- a/tests/validation/fixtures/ROIAlignLayerFixture.h
+++ b/tests/validation/fixtures/ROIAlignLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ROIALIGNLAYER_FIXTURE
-#define ARM_COMPUTE_TEST_ROIALIGNLAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_ROIALIGNLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_ROIALIGNLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -47,6 +47,12 @@ class ROIAlignLayerGenericFixture : public framework::Fixture
public:
void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_rois_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::QASYMM16 : data_type;
_target = compute_target(input_shape, data_type, data_layout, pool_info, rois_shape, qinfo, output_qinfo);
_reference = compute_reference(input_shape, data_type, pool_info, rois_shape, qinfo, output_qinfo);
@@ -209,4 +215,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ROIALIGNLAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_ROIALIGNLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/RangeFixture.h b/tests/validation/fixtures/RangeFixture.h
index 166613a318..50682e979e 100644
--- a/tests/validation/fixtures/RangeFixture.h
+++ b/tests/validation/fixtures/RangeFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_RANGE_FIXTURE
-#define ARM_COMPUTE_TEST_RANGE_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_RANGEFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_RANGEFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -57,6 +57,12 @@ class RangeFixture : public framework::Fixture
public:
void setup(const DataType data_type0, float start, float step, const QuantizationInfo qinfo0 = QuantizationInfo())
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type0 == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(data_type0, qinfo0, start, step);
_reference = compute_reference(data_type0, qinfo0, start, step);
}
@@ -138,4 +144,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_RANGE_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_RANGEFIXTURE_H
diff --git a/tests/validation/fixtures/ReduceMeanFixture.h b/tests/validation/fixtures/ReduceMeanFixture.h
index e61941435c..9f18497095 100644
--- a/tests/validation/fixtures/ReduceMeanFixture.h
+++ b/tests/validation/fixtures/ReduceMeanFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_REDUCE_MEAN_FIXTURE
-#define ARM_COMPUTE_TEST_REDUCE_MEAN_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_REDUCEMEANFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_REDUCEMEANFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -49,6 +49,12 @@ class ReduceMeanValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info_input, QuantizationInfo quantization_info_output)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, data_type, axis, keep_dims, quantization_info_input, quantization_info_output);
_reference = compute_reference(shape, data_type, axis, keep_dims, quantization_info_input, quantization_info_output);
}
@@ -172,4 +178,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_REDUCE_MEAN_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_REDUCEMEANFIXTURE_H
diff --git a/tests/validation/fixtures/ReductionOperationFixture.h b/tests/validation/fixtures/ReductionOperationFixture.h
index b44f299486..fc9801d9fd 100644
--- a/tests/validation/fixtures/ReductionOperationFixture.h
+++ b/tests/validation/fixtures/ReductionOperationFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_REDUCTION_OPERATION_FIXTURE
-#define ARM_COMPUTE_TEST_REDUCTION_OPERATION_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_REDUCTIONOPERATIONFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_REDUCTIONOPERATIONFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -47,6 +47,12 @@ class ReductionOperationValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType data_type, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info, bool keep_dims = false)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
_keep_dims = keep_dims && !is_arg_min_max;
@@ -166,4 +172,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_REDUCTION_OPERATION_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_REDUCTIONOPERATIONFIXTURE_H
diff --git a/tests/validation/fixtures/ReverseFixture.h b/tests/validation/fixtures/ReverseFixture.h
index 856bff7b12..5bb8f876d2 100644
--- a/tests/validation/fixtures/ReverseFixture.h
+++ b/tests/validation/fixtures/ReverseFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,6 +47,12 @@ class ReverseValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, TensorShape axis_shape, DataType data_type, bool use_negative_axis = false, bool use_inverted_axis = false)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_num_dims = shape.num_dimensions();
_target = compute_target(shape, axis_shape, data_type, use_negative_axis, use_inverted_axis);
_reference = compute_reference(shape, axis_shape, data_type, use_negative_axis, use_inverted_axis);
diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h
index 86d89d71f7..03a7ca6ab3 100644
--- a/tests/validation/fixtures/ScaleFixture.h
+++ b/tests/validation/fixtures/ScaleFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,6 +42,12 @@ public:
void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy,
bool align_corners, bool mixed_layout, QuantizationInfo output_quantization_info)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_shape = shape;
_policy = policy;
_border_mode = border_mode;
diff --git a/tests/validation/fixtures/ScatterLayerFixture.h b/tests/validation/fixtures/ScatterLayerFixture.h
index af161ef98b..34c6023b09 100644
--- a/tests/validation/fixtures/ScatterLayerFixture.h
+++ b/tests/validation/fixtures/ScatterLayerFixture.h
@@ -25,7 +25,6 @@
#define ACL_TESTS_VALIDATION_FIXTURES_SCATTERLAYERFIXTURE_H
#include "arm_compute/core/Utils.h"
-#include "arm_compute/runtime/CL/CLTensorAllocator.h"
#include "tests/Globals.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
diff --git a/tests/validation/fixtures/SelectFixture.h b/tests/validation/fixtures/SelectFixture.h
index 8cb6f062f9..eef86b808e 100644
--- a/tests/validation/fixtures/SelectFixture.h
+++ b/tests/validation/fixtures/SelectFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_SELECT_FIXTURE
-#define ARM_COMPUTE_TEST_SELECT_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_SELECTFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_SELECTFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -65,6 +65,12 @@ class SelectValidationFixture : public framework::Fixture
public:
void setup(TensorShape shape, bool has_same_same_rank, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
TensorShape condition_shape = detail::select_condition_shape(shape, has_same_same_rank);
_target = compute_target(shape, condition_shape, data_type);
@@ -144,4 +150,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SELECT_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_SELECTFIXTURE_H
diff --git a/tests/validation/fixtures/SliceOperationsFixtures.h b/tests/validation/fixtures/SliceOperationsFixtures.h
index b1f91ea2e0..65b8fb88d2 100644
--- a/tests/validation/fixtures/SliceOperationsFixtures.h
+++ b/tests/validation/fixtures/SliceOperationsFixtures.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_SLICE_OPERATIONS_FIXTURE
-#define ARM_COMPUTE_TEST_SLICE_OPERATIONS_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_SLICEOPERATIONSFIXTURES_H
+#define ACL_TESTS_VALIDATION_FIXTURES_SLICEOPERATIONSFIXTURES_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -47,6 +47,12 @@ class SliceFixture : public framework::Fixture
public:
void setup(TensorShape shape, Coordinates starts, Coordinates ends, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, starts, ends, data_type);
_reference = compute_reference(shape, starts, ends, data_type);
}
@@ -112,6 +118,12 @@ public:
int32_t begin_mask, int32_t end_mask, int32_t shrink_mask,
DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, starts, ends, strides, begin_mask, end_mask, shrink_mask, data_type);
_reference = compute_reference(shape, starts, ends, strides, begin_mask, end_mask, shrink_mask, data_type);
}
@@ -176,4 +188,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SLICE_OPERATIONS_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_SLICEOPERATIONSFIXTURES_H
diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h
index f4bf8df9c0..399a8b70c4 100644
--- a/tests/validation/fixtures/SoftmaxLayerFixture.h
+++ b/tests/validation/fixtures/SoftmaxLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_SOFTMAX_LAYER_FIXTURE
-#define ARM_COMPUTE_TEST_SOFTMAX_LAYER_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_SOFTMAXLAYERFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_SOFTMAXLAYERFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -48,6 +48,12 @@ class SoftmaxValidationGenericFixture : public framework::Fixture
public:
void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, float beta, size_t axis)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_quantization_info = quantization_info;
_reference = compute_reference(shape, data_type, quantization_info, beta, axis);
@@ -157,4 +163,4 @@ public:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SOFTMAX_LAYER_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_SOFTMAXLAYERFIXTURE_H
diff --git a/tests/validation/fixtures/SplitFixture.h b/tests/validation/fixtures/SplitFixture.h
index 203925329c..79ce152671 100644
--- a/tests/validation/fixtures/SplitFixture.h
+++ b/tests/validation/fixtures/SplitFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_SPLIT_FIXTURE
-#define ARM_COMPUTE_TEST_SPLIT_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_SPLITFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_SPLITFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -49,6 +49,12 @@ class SplitFixture : public framework::Fixture
public:
void setup(TensorShape shape, unsigned int axis, unsigned int splits, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, axis, splits, data_type);
_reference = compute_reference(shape, axis, splits, data_type);
}
@@ -150,6 +156,12 @@ class SplitShapesFixture : public framework::Fixture
public:
void setup(TensorShape shape, unsigned int axis, std::vector<TensorShape> split_shapes, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(shape, axis, split_shapes, data_type);
_reference = compute_reference(shape, axis, split_shapes, data_type);
}
@@ -254,4 +266,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SPLIT_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_SPLITFIXTURE_H
diff --git a/tests/validation/fixtures/UnstackFixture.h b/tests/validation/fixtures/UnstackFixture.h
index 30b7dd5539..b543ea263c 100644
--- a/tests/validation/fixtures/UnstackFixture.h
+++ b/tests/validation/fixtures/UnstackFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, 2023 Arm Limited.
+ * Copyright (c) 2018-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_UNSTACK_FIXTURE
-#define ARM_COMPUTE_TEST_UNSTACK_FIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_UNSTACKFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_UNSTACKFIXTURE_H
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -49,6 +49,12 @@ class UnstackValidationFixture : public framework::Fixture
public:
void setup(TensorShape input_shape, int axis, int num, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(input_shape, axis, num, data_type);
_reference = compute_reference(input_shape, axis, num, data_type);
}
@@ -114,4 +120,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_UNSTACK_FIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_UNSTACKFIXTURE_H
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index 20b678b36c..4d165a6563 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -59,6 +59,12 @@ public:
DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
ARM_COMPUTE_UNUSED(dilation);
_mixed_layout = mixed_layout;
_target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
@@ -244,6 +250,12 @@ class WinogradInputTransformValidationFixture : public framework::Fixture
public:
void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
_mixed_layout = mixed_layout;
_target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
@@ -355,6 +367,12 @@ class WinogradFilterTransformValidationFixture : public framework::Fixture
public:
void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
@@ -469,6 +487,12 @@ class WinogradOutputTransformValidationFixture : public framework::Fixture
public:
void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type, ActivationLayerInfo act_info = ActivationLayerInfo())
{
+ if(std::is_same<TensorType, Tensor>::value && // Cpu
+ data_type == DataType::F16 && !CPUInfo::get().has_fp16())
+ {
+ return;
+ }
+
_target = compute_target(input_shape, winograd_info, data_type, act_info);
_reference = compute_reference(input_shape, winograd_info, data_type, act_info);
}
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h
index 4c1cc94d3d..0ab90c675f 100644
--- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/MatMulKernelFixture.h
@@ -255,7 +255,6 @@ protected:
// We reshape the gemm output back if the tensor is high dimensional
if (output_shape_collapsed != output_shape)
{
- // std::cout << "called reshape: \n";
result = reference::reshape_layer(result, output_shape);
}