aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorRenato Arantes <renato.arantes@arm.com>2024-01-26 17:31:18 +0000
committerRenato Barros Arantes <renato.arantes@arm.com>2024-03-21 11:15:30 +0000
commit36a75dafdbe6d6a3a6f50bd075fe01f5b7dace38 (patch)
tree0701d615ef30444b9d0789db691b59b81fd9e86e /tests
parentd2191150736dde66d79eb97e0c8ee506eef3c8fc (diff)
downloadComputeLibrary-36a75dafdbe6d6a3a6f50bd075fe01f5b7dace38.tar.gz
[ONCPUML-1451] Add matmul kernel to enable bf16 to bf16 operations via PyTorch® autocast() function
The full range of tests must be added with [MLINFSW-482] epic due to the lack of reordering kernels implemented in Acl. Co-Authored-By: David Mansell <David.Mansell@arm.com> Change-Id: I820d316295a1ec94fdc89c37e4144a268f914c36 Signed-off-by: Renato Arantes <renato.arantes@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11169 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/SConscript5
-rw-r--r--tests/validation/Helpers.h45
-rw-r--r--tests/validation/NEON/MatMul.cpp402
-rw-r--r--tests/validation/fixtures/MatMulFixture.h383
-rw-r--r--tests/validation/reference/ActivationLayer.cpp27
-rw-r--r--tests/validation/reference/ActivationLayer.h23
-rw-r--r--tests/validation/reference/DepthConvertLayer.cpp4
-rw-r--r--tests/validation/reference/GEMM.cpp79
-rw-r--r--tests/validation/reference/Permute.cpp18
-rw-r--r--tests/validation/reference/ReshapeLayer.cpp15
10 files changed, 706 insertions, 295 deletions
diff --git a/tests/SConscript b/tests/SConscript
index 305f1693d1..0907c5713b 100644
--- a/tests/SConscript
+++ b/tests/SConscript
@@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright (c) 2017-2023 Arm Limited.
+# Copyright (c) 2017-2023,2024 Arm Limited.
#
# SPDX-License-Identifier: MIT
#
@@ -81,6 +81,9 @@ if 'macos' in test_env['os']:
load_whole_archive = '-Wl,-force_load'
noload_whole_archive = ''
+if (env['multi_isa']):
+ test_env.Append(CPPDEFINES=['ARM_COMPUTE_ENABLE_BF16'])
+
if env['os'] in ['android', 'macos', 'bare_metal'] or env['standalone']:
Import("arm_compute_a")
Import("arm_compute_graph_a")
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 647adcdb69..e044620556 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2023,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,6 +27,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/function_info/ActivationLayerInfo.h"
+
#include "support/Half.h"
#include "tests/Globals.h"
#include "tests/SimpleTensor.h"
@@ -52,6 +53,10 @@ template <>
struct is_floating_point<half> : public std::true_type
{
};
+template <>
+struct is_floating_point<bfloat16> : public std::true_type
+{
+};
/** Helper struct to store the hints for
* - destination quantization info
@@ -78,13 +83,13 @@ std::pair<T, T> get_activation_layer_test_bounds(ActivationLayerInfo::Activation
{
std::pair<T, T> bounds;
- switch(data_type)
+ switch (data_type)
{
case DataType::F16:
{
using namespace half_float::literal;
- switch(activation)
+ switch (activation)
{
case ActivationLayerInfo::ActivationFunction::TANH:
case ActivationLayerInfo::ActivationFunction::SQUARE:
@@ -104,7 +109,7 @@ std::pair<T, T> get_activation_layer_test_bounds(ActivationLayerInfo::Activation
break;
}
case DataType::F32:
- switch(activation)
+ switch (activation)
{
case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
// Reduce range as exponent overflows
@@ -227,7 +232,8 @@ std::pair<int, int> get_quantized_qasymm8_signed_bounds(const QuantizationInfo &
* @param[in] max Floating point maximum value to be quantized
* @param[in] channel_id Channel id for per channel quantization info.
*/
-std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0);
+std::pair<int, int>
+get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0);
/** Add random padding along the X axis (between 1 and 16 columns per side) to all the input tensors.
* This is used in our validation suite in order to simulate implicit padding addition after configuring, but before allocating.
@@ -238,7 +244,9 @@ std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo
*
* @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC
*/
-void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &data_layout = DataLayout::NHWC, bool only_right_pad = false);
+void add_padding_x(std::initializer_list<ITensor *> tensors,
+ const DataLayout &data_layout = DataLayout::NHWC,
+ bool only_right_pad = false);
/** For 2d convolution, given the Lhs/Rhs matrix quantization informations and the convolution dimension,
* calculate a suitable output quantization and suggested bias range for obtaining non-saturated outputs with high probability.
@@ -255,11 +263,11 @@ void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &d
*/
QuantizationHint suggest_conv_dst_q_info_and_bias(const QuantizationInfo &in_q_info,
const QuantizationInfo &weight_q_info,
- int32_t height,
- int32_t width,
- int32_t channels,
- DataType data_type,
- float bias_fraction);
+ int32_t height,
+ int32_t width,
+ int32_t channels,
+ DataType data_type,
+ float bias_fraction);
/** For a matrix multiplication, given the Lhs/Rhs matrix quantization informations and the matrix multiplication dimensions,
* calculate a suitable output quantization and suggested bias range for obtaining non-saturated outputs with high probability.
@@ -275,8 +283,12 @@ QuantizationHint suggest_conv_dst_q_info_and_bias(const QuantizationInfo &in_q_i
* @return QuantizationHint object containing the suggested output quantization info and min/max bias range
*/
QuantizationHint suggest_matmul_dst_q_info_and_bias(const QuantizationInfo &lhs_q_info,
- const QuantizationInfo &rhs_q_info, int32_t m, int32_t n, int32_t k, DataType data_type,
- float bias_fraction);
+ const QuantizationInfo &rhs_q_info,
+ int32_t m,
+ int32_t n,
+ int32_t k,
+ DataType data_type,
+ float bias_fraction);
/** For a multiply-accumulate (mac), given the Lhs/Rhs vector quantization informations and the dot product dimensions,
* calculate a suitable output quantization and suggested bias range for obtaining non-saturated outputs with high probability.
@@ -291,8 +303,11 @@ QuantizationHint suggest_matmul_dst_q_info_and_bias(const QuantizationInfo &lhs_
* @return QuantizationHint object containing the suggested output quantization info and min/max bias range
*/
QuantizationHint suggest_mac_dst_q_info_and_bias(const QuantizationInfo &lhs_q_info,
- const QuantizationInfo &rhs_q_info, int32_t k, DataType data_type, float bias_fraction,
- int num_sd = 2);
+ const QuantizationInfo &rhs_q_info,
+ int32_t k,
+ DataType data_type,
+ float bias_fraction,
+ int num_sd = 2);
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/NEON/MatMul.cpp b/tests/validation/NEON/MatMul.cpp
index f91dea1b4f..02f0bfda1e 100644
--- a/tests/validation/NEON/MatMul.cpp
+++ b/tests/validation/NEON/MatMul.cpp
@@ -24,15 +24,14 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/NEON/functions/NEMatMul.h"
-#include "tests/NEON/Accessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Macros.h"
-#include "tests/framework/datasets/Datasets.h"
-#include "tests/validation/Validation.h"
-
#include "tests/datasets/LargeMatMulDataset.h"
#include "tests/datasets/SmallMatMulDataset.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/framework/Macros.h"
+#include "tests/NEON/Accessor.h"
#include "tests/validation/fixtures/MatMulFixture.h"
+#include "tests/validation/Validation.h"
namespace arm_compute
{
@@ -45,8 +44,9 @@ using framework::dataset::make;
TEST_SUITE(NEON)
TEST_SUITE(MatMul)
-constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */
-const AbsoluteTolerance<half> tolerance_fp16(half(0.1f));
+constexpr AbsoluteTolerance<float> tolerance_fp32(
+ 0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */
+const AbsoluteTolerance<half> tolerance_fp16(half(0.1f));
#ifdef __aarch64__
constexpr AbsoluteTolerance<int32_t> tolerance_qasymm8(1);
constexpr AbsoluteTolerance<int32_t> tolerance_qasymm8_signed(1);
@@ -120,55 +120,79 @@ template <typename T>
using NEMatMulFastMathFixture = MatMulGenericValidationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
template <typename T>
-using NEMatMulDynamicTensorsFixture = MatMulValidationWithDynamicTensorsFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
+using NEMatMulFixedFormatFixture = MatMulFixedFormatFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
+
+template <typename T>
+using NEMatMulDynamicTensorsFixture =
+ MatMulValidationWithDynamicTensorsFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
template <typename T>
using NEQuantizedMatMulFixture = QuantizedMatMulValidationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture<float>, framework::DatasetMode::PRECOMMIT,
- combine(
- datasets::SmallMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::F32),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ NEMatMulFixture<float>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::F32),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_fp32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(
- datasets::LargeMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::F32),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
+FIXTURE_DATA_TEST_CASE(RunLarge,
+ NEMatMulFixture<float>,
+ framework::DatasetMode::NIGHTLY,
+ combine(datasets::LargeMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::F32),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_fp32);
}
-FIXTURE_DATA_TEST_CASE(RunHighDimensions, NEMatMulFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(
- datasets::HighDimensionalMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::F32),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
+FIXTURE_DATA_TEST_CASE(RunHighDimensions,
+ NEMatMulFixture<float>,
+ framework::DatasetMode::NIGHTLY,
+ combine(datasets::HighDimensionalMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::F32),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_fp32);
}
-FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture<float>, framework::DatasetMode::PRECOMMIT,
- combine(
- datasets::SmallMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::F32),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }),
- make("NumberOfRuns", 5)))
+FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors,
+ NEMatMulDynamicTensorsFixture<float>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::F32),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+}),
+make("NumberOfRuns", 5)))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_fp32);
@@ -179,37 +203,58 @@ TEST_SUITE_END() // FP32
/* Note : MatMul BF16 is enabled by specifying FP32 datatype and enabling the fast math setting */
constexpr AbsoluteTolerance<float> tolerance_bf16(0.02f);
TEST_SUITE(BF16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFastMathFixture<float>, framework::DatasetMode::PRECOMMIT,
- combine(
- datasets::SmallMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::F32),
- make("ActivationInfo", { ActivationLayerInfo() }),
- make("RunTimes", { 0 }),
- make("Settings", { CpuMatMulSettings().fast_math(true) }),
- make("LhsQInfo", { QuantizationInfo() }),
- make("RhsQInfo", { QuantizationInfo() }),
- make("OutQInfo", { QuantizationInfo() }))
-)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ NEMatMulFastMathFixture<float>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::F32),
+ make("ActivationInfo", {ActivationLayerInfo()}),
+ make("RunTimes", {0}),
+ make("Settings", {CpuMatMulSettings().fast_math(true)}),
+ make("LhsQInfo", {QuantizationInfo()}),
+ make("RhsQInfo", {QuantizationInfo()}),
+ make("OutQInfo", {QuantizationInfo()})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_bf16);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFastMathFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(
- datasets::LargeMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::F32),
- make("ActivationInfo", { ActivationLayerInfo() }),
- make("RunTimes", { 0 }),
- make("Settings", { CpuMatMulSettings().fast_math(true) }),
- make("LhsQInfo", { QuantizationInfo() }),
- make("RhsQInfo", { QuantizationInfo() }),
- make("OutQInfo", { QuantizationInfo() }))
-)
+FIXTURE_DATA_TEST_CASE(RunTinyFixedFormat,
+ NEMatMulFixedFormatFixture<bfloat16>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::TinyMatMulDataset(),
+ make("TransposeA", {false}),
+ make("TransposeB", {false}),
+ make("DataType", DataType::BFLOAT16),
+ make("ActivationInfo", {ActivationLayerInfo()}),
+ make("RunTimes", {0}),
+ make("Settings", {CpuMatMulSettings().fast_math(true).fixed_format(true)}),
+ make("LhsQInfo", {QuantizationInfo()}),
+ make("RhsQInfo", {QuantizationInfo()}),
+ make("OutQInfo", {QuantizationInfo()})))
+{
+ if (CPUInfo::get().has_bf16())
+ {
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_bf16);
+ }
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge,
+ NEMatMulFastMathFixture<float>,
+ framework::DatasetMode::NIGHTLY,
+ combine(datasets::LargeMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::F32),
+ make("ActivationInfo", {ActivationLayerInfo()}),
+ make("RunTimes", {0}),
+ make("Settings", {CpuMatMulSettings().fast_math(true)}),
+ make("LhsQInfo", {QuantizationInfo()}),
+ make("RhsQInfo", {QuantizationInfo()}),
+ make("OutQInfo", {QuantizationInfo()})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_bf16, 0.01 /* tolerance_num */);
@@ -219,36 +264,51 @@ TEST_SUITE_END() // BF16
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture<half>, framework::DatasetMode::PRECOMMIT,
- combine(
- datasets::SmallMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::F16),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ NEMatMulFixture<half>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::F16),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_fp16);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture<half>, framework::DatasetMode::NIGHTLY,
- combine(
- datasets::LargeMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::F16),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
+FIXTURE_DATA_TEST_CASE(RunLarge,
+ NEMatMulFixture<half>,
+ framework::DatasetMode::NIGHTLY,
+ combine(datasets::LargeMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::F16),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_fp16);
}
-FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture<half>, framework::DatasetMode::PRECOMMIT,
- combine(
- datasets::SmallMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::F16),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }),
- make("NumberOfRuns", 5)))
+FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors,
+ NEMatMulDynamicTensorsFixture<half>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::F16),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+}),
+make("NumberOfRuns", 5)))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_fp16);
@@ -263,52 +323,64 @@ TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizedMatMulFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
- combine(
- datasets::SmallMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::QASYMM8),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }),
- make("NumberOfExtraRuns", { 0, 1 }),
- make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) }),
- make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) }),
- make("OutQInfo", { QuantizationInfo(1.f, 2) }))
-)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ NEQuantizedMatMulFixture<uint8_t>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::QASYMM8),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+}),
+make("NumberOfExtraRuns", {0, 1}),
+make("LhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
+make("RhsQInfo", {QuantizationInfo(1.f / 30, -1)}),
+make("OutQInfo", {QuantizationInfo(1.f, 2)})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, NEQuantizedMatMulFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(
- datasets::SmallerMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::QASYMM8),
- make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) }),
- make("NumberOfExtraRuns", { 0, 1 }),
- make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) }),
- make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) }),
- make("OutQInfo", { QuantizationInfo(1.f, 2) }))
-)
+FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation,
+ NEQuantizedMatMulFixture<uint8_t>,
+ framework::DatasetMode::NIGHTLY,
+ combine(datasets::SmallerMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::QASYMM8),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+}),
+make("NumberOfExtraRuns", {0, 1}),
+make("LhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
+make("RhsQInfo", {QuantizationInfo(1.f / 30, -1)}),
+make("OutQInfo", {QuantizationInfo(1.f, 2)})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(
- datasets::LargeMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::QASYMM8),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }),
- make("NumberOfExtraRuns", { 0, 1 }),
- make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) }),
- make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) }),
- make("OutQInfo", { QuantizationInfo(1.f, 2) }))
-)
+FIXTURE_DATA_TEST_CASE(RunLarge,
+ NEQuantizedMatMulFixture<uint8_t>,
+ framework::DatasetMode::NIGHTLY,
+ combine(datasets::LargeMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::QASYMM8),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+}),
+make("NumberOfExtraRuns", {0, 1}),
+make("LhsQInfo", {QuantizationInfo(1.f / 100, 1)}),
+make("RhsQInfo", {QuantizationInfo(1.f / 200, -1)}),
+make("OutQInfo", {QuantizationInfo(1.f, 2)})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
@@ -318,52 +390,64 @@ TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizedMatMulFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
- combine(
- datasets::SmallMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::QASYMM8_SIGNED),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }),
- make("NumberOfExtraRuns", { 0, 1 }),
- make("LhsQInfo", { QuantizationInfo(1.f / 40, -2) }),
- make("RhsQInfo", { QuantizationInfo(1.f / 50, 1) }),
- make("OutQInfo", { QuantizationInfo(1.f, 1) }))
-)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ NEQuantizedMatMulFixture<int8_t>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(datasets::SmallMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::QASYMM8_SIGNED),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+}),
+make("NumberOfExtraRuns", {0, 1}),
+make("LhsQInfo", {QuantizationInfo(1.f / 40, -2)}),
+make("RhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
+make("OutQInfo", {QuantizationInfo(1.f, 1)})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
}
-FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, NEQuantizedMatMulFixture<int8_t>, framework::DatasetMode::NIGHTLY,
- combine(
- datasets::SmallerMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::QASYMM8_SIGNED),
- make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) }),
- make("NumberOfExtraRuns", { 0, 1 }),
- make("LhsQInfo", { QuantizationInfo(1.f / 40, -2) }),
- make("RhsQInfo", { QuantizationInfo(1.f / 50, 1) }),
- make("OutQInfo", { QuantizationInfo(1.f, 1) }))
-)
+FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation,
+ NEQuantizedMatMulFixture<int8_t>,
+ framework::DatasetMode::NIGHTLY,
+ combine(datasets::SmallerMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::QASYMM8_SIGNED),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+}),
+make("NumberOfExtraRuns", {0, 1}),
+make("LhsQInfo", {QuantizationInfo(1.f / 40, -2)}),
+make("RhsQInfo", {QuantizationInfo(1.f / 50, 1)}),
+make("OutQInfo", {QuantizationInfo(1.f, 1)})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture<int8_t>, framework::DatasetMode::NIGHTLY,
- combine(
- datasets::LargeMatMulDataset(),
- make("TransposeA", { false, true }),
- make("TransposeB", { false, true }),
- make("DataType", DataType::QASYMM8_SIGNED),
- make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }),
- make("NumberOfExtraRuns", { 0, 1 }),
- make("LhsQInfo", { QuantizationInfo(1.f / 150, -2) }),
- make("RhsQInfo", { QuantizationInfo(1.f / 250, 1) }),
- make("OutQInfo", { QuantizationInfo(1.f, 1) }))
-)
+FIXTURE_DATA_TEST_CASE(RunLarge,
+ NEQuantizedMatMulFixture<int8_t>,
+ framework::DatasetMode::NIGHTLY,
+ combine(datasets::LargeMatMulDataset(),
+ make("TransposeA", {false, true}),
+ make("TransposeB", {false, true}),
+ make("DataType", DataType::QASYMM8_SIGNED),
+ make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)
+}),
+make("NumberOfExtraRuns", {0, 1}),
+make("LhsQInfo", {QuantizationInfo(1.f / 150, -2)}),
+make("RhsQInfo", {QuantizationInfo(1.f / 250, 1)}),
+make("OutQInfo", {QuantizationInfo(1.f, 1)})))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
@@ -372,7 +456,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture<int8_t>, framework::Da
TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE_END() // Quantized
-#endif // __aarch64__
+#endif // __aarch64__
TEST_SUITE_END() // MatMul
TEST_SUITE_END() // NEON
diff --git a/tests/validation/fixtures/MatMulFixture.h b/tests/validation/fixtures/MatMulFixture.h
index 2e79612a37..ffd12e56d0 100644
--- a/tests/validation/fixtures/MatMulFixture.h
+++ b/tests/validation/fixtures/MatMulFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,15 +27,17 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+
#include "src/core/utils/quantization/AsymmHelpers.h"
#include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT
#include "tests/framework/Fixture.h"
-#include "tests/validation/Validation.h"
#include "tests/validation/reference/ActivationLayer.h"
#include "tests/validation/reference/GEMM.h"
#include "tests/validation/reference/GEMMLowp.h"
#include "tests/validation/reference/Permute.h"
#include "tests/validation/reference/ReshapeLayer.h"
+#include "tests/validation/Validation.h"
+
#include <limits>
#include <random>
#include <type_traits>
@@ -50,32 +52,50 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class MatMulGenericValidationFixture : public framework::Fixture
{
public:
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs,
- Settings settings, QuantizationInfo a_qinfo = QuantizationInfo(), QuantizationInfo b_qinfo = QuantizationInfo(), QuantizationInfo o_qinfo = QuantizationInfo())
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs,
+ Settings settings,
+ QuantizationInfo a_qinfo = QuantizationInfo(),
+ QuantizationInfo b_qinfo = QuantizationInfo(),
+ QuantizationInfo o_qinfo = QuantizationInfo())
{
// For brevity, the input shapes are assumed to be not-transposed for both a and b matrices.
- if(transpose_a)
+ if (transpose_a)
{
permute(shape_a, PermutationVector(1U, 0U));
}
- if(transpose_b)
+ if (transpose_b)
{
permute(shape_b, PermutationVector(1U, 0U));
}
- _target = compute_target(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, settings, a_qinfo, b_qinfo, o_qinfo);
- _reference = compute_reference(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, a_qinfo, b_qinfo, o_qinfo);
+ _target = compute_target(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info,
+ num_extra_runs, settings, a_qinfo, b_qinfo, o_qinfo);
+ _reference = compute_reference(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info,
+ a_qinfo, b_qinfo, o_qinfo);
}
protected:
template <typename U>
void fill(U &&tensor, int i, float lo = -1.f, float hi = 1.f)
{
- switch(tensor.data_type())
+ switch (tensor.data_type())
{
+ case DataType::BFLOAT16:
+ {
+ arm_compute::utils::uniform_real_distribution_16bit<bfloat16> distribution{float(lo), float(hi)};
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::F16:
{
- arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(lo), float(hi) };
+ arm_compute::utils::uniform_real_distribution_16bit<half> distribution{float(lo), float(hi)};
library->fill(tensor, distribution, i);
break;
}
@@ -98,8 +118,18 @@ protected:
}
}
- TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool transpose_a, bool transpose_b, DataType data_type,
- ActivationLayerInfo act_info, int num_extra_runs, const Settings &settings, QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
+ virtual TensorType compute_target(const TensorShape &shape_a,
+ const TensorShape &shape_b,
+ const TensorShape &output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs,
+ const Settings &settings,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo)
{
// 1. Create Classes and configure function
// ----------------------------------------------------
@@ -137,7 +167,7 @@ protected:
ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
// For multiple runs.
- for(int i = 0; i < num_extra_runs; i++)
+ for (int i = 0; i < num_extra_runs; i++)
{
// Stress dynamic tensors by running multiple times.
// --------------------------------------------------------
@@ -164,7 +194,12 @@ protected:
template <typename TT>
typename std::enable_if < !std::is_integral<TT>::value, SimpleTensor<TT >>::type
- compute_reference_gemm(const SimpleTensor<TT> &a, const SimpleTensor<TT> &b, const SimpleTensor<TT> &c, float alpha, float beta, const QuantizationInfo &o_qinfo)
+ compute_reference_gemm(const SimpleTensor<TT> &a,
+ const SimpleTensor<TT> &b,
+ const SimpleTensor<TT> &c,
+ float alpha,
+ float beta,
+ const QuantizationInfo &o_qinfo)
{
ARM_COMPUTE_UNUSED(o_qinfo);
@@ -173,7 +208,12 @@ protected:
template <typename TT>
typename std::enable_if<std::is_integral<TT>::value, SimpleTensor<TT>>::type
- compute_reference_gemm(const SimpleTensor<TT> &a, const SimpleTensor<TT> &b, const SimpleTensor<TT> &c, float alpha, float beta, const QuantizationInfo &o_qinfo)
+ compute_reference_gemm(const SimpleTensor<TT> &a,
+ const SimpleTensor<TT> &b,
+ const SimpleTensor<TT> &c,
+ float alpha,
+ float beta,
+ const QuantizationInfo &o_qinfo)
{
ARM_COMPUTE_UNUSED(alpha, beta);
@@ -186,23 +226,30 @@ protected:
int32_t output_multiplier = 0;
int32_t output_shift = 0;
quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- std::vector<int32_t> output_multipliers{ output_multiplier };
- std::vector<int32_t> output_shifts{ output_shift };
+ std::vector<int32_t> output_multipliers{output_multiplier};
+ std::vector<int32_t> output_shifts{output_shift};
//The lhs and rhs offsets are negated here to keep the reference aligned with the function implementation where the lhs and rhs offsets are also negated.
- const auto tmp = reference::gemmlowp_matrix_multiply_core<int32_t>(
- a, b, c.shape(), -aq.offset, -bq.offset);
+ const auto tmp = reference::gemmlowp_matrix_multiply_core<int32_t>(a, b, c.shape(), -aq.offset, -bq.offset);
auto output = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TT>(
- tmp, output_multipliers, output_shifts, oq.offset,
- std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
+ tmp, output_multipliers, output_shifts, oq.offset, std::numeric_limits<int32_t>::lowest(),
+ std::numeric_limits<int32_t>::max());
output.quantization_info(o_qinfo);
return output;
}
- SimpleTensor<T> compute_reference(const TensorShape &a_shape, const TensorShape &b_shape, const TensorShape &output_shape, bool transpose_a, bool transpose_b, DataType data_type,
- ActivationLayerInfo act_info, QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
+ SimpleTensor<T> compute_reference(const TensorShape &a_shape,
+ const TensorShape &b_shape,
+ const TensorShape &output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo)
{
// We collapse dimensions > 2 onto dimension 2, i.e. 4D+ tensors will look like 3D
// This is necessary unless we choose to extend gemm reference for 4D+ tensors
@@ -211,9 +258,9 @@ protected:
TensorShape b_shape_collapsed = b_shape.collapsed_from(Window::DimZ);
// Create reference
- SimpleTensor<T> a{ a_shape_collapsed, data_type, 1, a_qinfo };
- SimpleTensor<T> b{ b_shape_collapsed, data_type, 1, b_qinfo };
- SimpleTensor<T> c{ output_shape_collapsed, data_type, 1 };
+ SimpleTensor<T> a{a_shape_collapsed, data_type, 1, a_qinfo};
+ SimpleTensor<T> b{b_shape_collapsed, data_type, 1, b_qinfo};
+ SimpleTensor<T> c{output_shape_collapsed, data_type, 1};
// Fill reference
fill(a, 2);
@@ -234,16 +281,16 @@ protected:
b_transposed_shape.set(1, b.shape().x());
// Define transposed tensors
- SimpleTensor<T> a_transposed{ a_transposed_shape, data_type };
- SimpleTensor<T> b_transposed{ b_transposed_shape, data_type };
+ SimpleTensor<T> a_transposed{a_transposed_shape, data_type};
+ SimpleTensor<T> b_transposed{b_transposed_shape, data_type};
// pretranspose a if necessary
- if(transpose_a)
+ if (transpose_a)
{
a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U));
}
// pretranspose b if necessary
- if(transpose_b)
+ if (transpose_b)
{
b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U));
}
@@ -251,12 +298,13 @@ protected:
// Setting beta to 0 will effectively disable C for the
// computation of the reference: alpha * A * B + 0 * C
// Use transposed tensors if boolean enabled else use original tensors
- auto result = compute_reference_gemm<T>((transpose_a) ? a_transposed : a, (transpose_b) ? b_transposed : b, c, 1.0f, 0.f, o_qinfo);
+ auto result = compute_reference_gemm<T>((transpose_a) ? a_transposed : a, (transpose_b) ? b_transposed : b, c,
+ 1.0f, 0.f, o_qinfo);
result = reference::activation_layer<T>(result, act_info, o_qinfo);
// We reshape the gemm output back if the tensor is high dimensional
- if(output_shape_collapsed != output_shape)
+ if (output_shape_collapsed != output_shape)
{
result = reference::reshape_layer(result, output_shape);
}
@@ -268,72 +316,293 @@ protected:
SimpleTensor<T> _reference{};
};
+/// TODO: (ONCPUML-1451) The current state of this fixture is interim and a longer-term testing method will be implemented later.
+/// @note: Currently we support only a 2x2 test due to the lack of reorder ref. implementation.
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulFixedFormatFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ TensorType compute_target(const TensorShape &shape_a,
+ const TensorShape &shape_b,
+ const TensorShape &output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs,
+ const Settings &settings,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo) override
+ {
+ // 1. Create Classes and configure function
+ // ----------------------------------------------------
+ // Create tensors
+ // Configure relevant classes and matmul function
+ TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, a_qinfo);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, b_qinfo);
+ TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, o_qinfo);
+
+ const auto weight_tensor_info = TensorInfo(*b.info());
+ const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info);
+ TensorType weights_transformed = create_tensor<TensorType>(new_tensor_info);
+
+ // Configure MatMulInfo class
+ MatMulInfo mm_info;
+ mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b);
+
+ // Ensure values are dynamic
+ a.info()->set_are_values_constant(false);
+ b.info()->set_are_values_constant(false);
+ weights_transformed.info()->set_are_values_constant(false);
+
+ FunctionType matmul;
+
+ // Configure operator
+ matmul.configure(&a, &weights_transformed, &dst, mm_info, settings, act_info);
+
+ // Assertions
+ ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(weights_transformed.info()->is_resizable());
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+ dst.allocator()->allocate();
+ weights_transformed.allocator()->allocate();
+
+ ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+ ARM_COMPUTE_ASSERT(!weights_transformed.info()->is_resizable());
+
+ // For multiple runs.
+ for (int i = 0; i < num_extra_runs; i++)
+ {
+ // Stress dynamic tensors by running multiple times.
+ // --------------------------------------------------------
+ // Fill tensors with new seed
+ // Run function
+ const int seed_offset = num_extra_runs * 100;
+ this->fill(AccessorType(a), seed_offset);
+ this->fill(AccessorType(b), seed_offset + 1);
+
+ matmul.run();
+ }
+
+ // 2. Final Run for reference comparison
+ // --------------------------------------------------------
+ // Re-fill tensors same seed as reference run
+ // Compute MatMul operation
+ this->fill(AccessorType(a), 2);
+ this->fill(AccessorType(b), 3);
+
+ rearrange_data(AccessorType(b), AccessorType(weights_transformed));
+
+ matmul.run();
+
+ return dst;
+ }
+
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs,
+ Settings settings,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo)
+ {
+ if (CPUInfo::get().has_bf16())
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, settings,
+ a_qinfo, b_qinfo, o_qinfo);
+ }
+ }
+
+private:
+ TensorInfo prepare_weights(const TensorInfo tensor_info)
+ {
+ const DataLayout data_layout = tensor_info.data_layout();
+ ARM_COMPUTE_EXPECT(data_layout == DataLayout::NCHW, framework::LogLevel::ERRORS);
+ const DataType data_type = tensor_info.data_type();
+ const TensorShape tensor_shape = tensor_info.tensor_shape();
+ const int H = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
+ const int W = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
+ ARM_COMPUTE_EXPECT(H <= 2 && W <= 2, framework::LogLevel::ERRORS);
+
+ arm_compute::Strides strides_in_bytes = tensor_info.strides_in_bytes();
+ strides_in_bytes.set(1, 32);
+ strides_in_bytes.set(2, 32);
+
+ const size_t offset_first_element_in_bytes = tensor_info.offset_first_element_in_bytes();
+ const size_t total_size_in_bytes = 32;
+
+ const TensorShape TS(H, W);
+
+ TensorInfo new_tensor_info = tensor_info;
+ new_tensor_info.init(TS, tensor_info.num_channels(), data_type, strides_in_bytes, offset_first_element_in_bytes,
+ total_size_in_bytes);
+
+ return new_tensor_info;
+ }
+
+ void rearrange_data(const AccessorType src, AccessorType dst)
+ {
+ const TensorShape src_tensor_shape = src.shape();
+ const DataLayout data_layout = src.data_layout();
+ ARM_COMPUTE_EXPECT(data_layout == DataLayout::NCHW, framework::LogLevel::ERRORS);
+ const unsigned int O =
+ src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O
+ const unsigned int H =
+ src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)];
+ const unsigned int W =
+ src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)];
+ const unsigned int I =
+ src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I
+ ARM_COMPUTE_EXPECT(H <= 2 && W <= 2, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(I == 1 && O == 1, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(src.num_elements() <= dst.num_elements(), framework::LogLevel::ERRORS);
+
+ const T *src_ptr = reinterpret_cast<const T *>(src.data());
+ T *dst_ptr = reinterpret_cast<T *>(dst.data());
+
+ // rearrange indexes for 2x2 input and weight
+ int dst_idx[] = {0, 4, 1, 5};
+ for (int i = 0; i < 4; i++)
+ {
+ dst_ptr[dst_idx[i]] = src_ptr[i];
+ }
+ }
+};
+
template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
-class MatMulValidationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+class MatMulValidationFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
{
public:
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type)
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type)
{
- MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, ActivationLayerInfo(), 0,
- Settings());
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, ActivationLayerInfo(), 0, Settings());
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
-class MatMulValidationWithDynamicTensorsFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+class MatMulValidationWithDynamicTensorsFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
{
public:
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs)
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs)
{
- MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings());
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings());
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
-class QuantizedMatMulValidationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+class QuantizedMatMulValidationFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
{
public:
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs,
- QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info,
+ int num_extra_runs,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo)
{
- MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(),
- a_qinfo, b_qinfo, o_qinfo);
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(),
+ a_qinfo, b_qinfo, o_qinfo);
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
-class MatMulValidationWithActivationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+class MatMulValidationWithActivationFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
{
public:
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info)
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo act_info)
{
- MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings());
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings());
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
-class MatMulValidationWithActivationAlphaBetaFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+class MatMulValidationWithActivationAlphaBetaFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
{
public:
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo::ActivationFunction function,
- float alpha_beta)
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo::ActivationFunction function,
+ float alpha_beta)
{
ActivationLayerInfo act_info(function, alpha_beta, alpha_beta);
- MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings());
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings());
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
-class QuantizedMatMulValidationWithActivationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+class QuantizedMatMulValidationWithActivationFixture
+ : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
{
public:
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo::ActivationFunction function,
- float alpha_beta, int num_extra_runs,
- QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
+ void setup(TensorShape shape_a,
+ TensorShape shape_b,
+ TensorShape output_shape,
+ bool transpose_a,
+ bool transpose_b,
+ DataType data_type,
+ ActivationLayerInfo::ActivationFunction function,
+ float alpha_beta,
+ int num_extra_runs,
+ QuantizationInfo a_qinfo,
+ QuantizationInfo b_qinfo,
+ QuantizationInfo o_qinfo)
{
ActivationLayerInfo act_info(function, alpha_beta, alpha_beta);
- MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(),
- a_qinfo, b_qinfo, o_qinfo);
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(
+ shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(),
+ a_qinfo, b_qinfo, o_qinfo);
}
};
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index 664b969125..2172362bdd 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2020,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#include "ActivationLayer.h"
#include "arm_compute/core/Types.h"
+
#include "tests/validation/Helpers.h"
namespace arm_compute
@@ -40,7 +41,7 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
ARM_COMPUTE_UNUSED(oq_info);
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
+ SimpleTensor<T> dst{src.shape(), src.data_type(), 1};
// Compute reference
const T a(info.a());
@@ -48,7 +49,7 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
#if defined(_OPENMP)
#pragma omp parallel for
#endif /* _OPENMP */
- for(int i = 0; i < src.num_elements(); ++i)
+ for (int i = 0; i < src.num_elements(); ++i)
{
dst[i] = activate_float<T>(src[i], a, b, info.activation());
}
@@ -57,7 +58,8 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
}
template <>
-SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
+SimpleTensor<uint8_t>
+activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
{
const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
@@ -68,7 +70,8 @@ SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src
}
template <>
-SimpleTensor<int8_t> activation_layer<int8_t>(const SimpleTensor<int8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
+SimpleTensor<int8_t>
+activation_layer<int8_t>(const SimpleTensor<int8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
{
const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
@@ -79,7 +82,8 @@ SimpleTensor<int8_t> activation_layer<int8_t>(const SimpleTensor<int8_t> &src, A
}
template <>
-SimpleTensor<int16_t> activation_layer<int16_t>(const SimpleTensor<int16_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
+SimpleTensor<int16_t>
+activation_layer<int16_t>(const SimpleTensor<int16_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
{
const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
@@ -88,9 +92,14 @@ SimpleTensor<int16_t> activation_layer<int16_t>(const SimpleTensor<int16_t> &src
SimpleTensor<int16_t> dst = convert_to_symmetric<int16_t>(dst_tmp, dst_qinfo);
return dst;
}
-template SimpleTensor<int32_t> activation_layer(const SimpleTensor<int32_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
-template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
-template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
+template SimpleTensor<int32_t>
+activation_layer(const SimpleTensor<int32_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
+template SimpleTensor<float>
+activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
+template SimpleTensor<half>
+activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
+template SimpleTensor<bfloat16>
+activation_layer(const SimpleTensor<bfloat16> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h
index a813ba5037..7f896bd696 100644
--- a/tests/validation/reference/ActivationLayer.h
+++ b/tests/validation/reference/ActivationLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2022 Arm Limited.
+ * Copyright (c) 2017-2020,2022,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ACTIVATION_LAYER_H
-#define ARM_COMPUTE_TEST_ACTIVATION_LAYER_H
+#ifndef ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H
+#define ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H
#include "tests/SimpleTensor.h"
#include "tests/validation/Helpers.h"
@@ -40,7 +40,7 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
{
T ret;
- switch(activation)
+ switch (activation)
{
case ActivationLayerInfo::ActivationFunction::ABS:
ret = std::abs(x);
@@ -61,13 +61,13 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
ret = std::min<T>(a, std::max<T>(b, x));
break;
case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
- ret = (x > 0) ? x : a * x;
+ ret = x > static_cast<T>(0) ? x : static_cast<T>(a * x);
break;
case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
ret = std::log(static_cast<T>(1) + std::exp(static_cast<double>(x)));
break;
case ActivationLayerInfo::ActivationFunction::ELU:
- ret = (x > 0) ? x : a * (std::exp(x) - static_cast<T>(1));
+ ret = x > static_cast<T>(0) ? x : static_cast<T>(a * (std::exp(x) - static_cast<T>(1)));
break;
case ActivationLayerInfo::ActivationFunction::SQRT:
ret = std::sqrt(x);
@@ -82,10 +82,11 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
ret = x;
break;
case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
- ret = x * ((std::min(std::max(static_cast<T>(x + 3), static_cast<T>(0.0f)), static_cast<T>(6.0f))) * 0.166666667f);
+ ret = x * ((std::min(std::max(static_cast<T>(x + 3), static_cast<T>(0.0f)), static_cast<T>(6.0f))) *
+ 0.166666667f);
break;
case ActivationLayerInfo::ActivationFunction::SWISH:
- ret = static_cast<T>(x) / (static_cast<T>(1) + std::exp(-a*x));
+ ret = static_cast<T>(x) / (static_cast<T>(1) + std::exp(-a * x));
break;
case ActivationLayerInfo::ActivationFunction::GELU:
ret = x * 0.5f * (1 + erf(x / std::sqrt(2.0f)));
@@ -99,9 +100,11 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
}
template <typename T>
-SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info = QuantizationInfo());
+SimpleTensor<T> activation_layer(const SimpleTensor<T> &src,
+ ActivationLayerInfo info,
+ const QuantizationInfo &oq_info = QuantizationInfo());
} // namespace reference
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ACTIVATION_LAYER_H */
+#endif // ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H
diff --git a/tests/validation/reference/DepthConvertLayer.cpp b/tests/validation/reference/DepthConvertLayer.cpp
index 1e4939129e..3f88897f8e 100644
--- a/tests/validation/reference/DepthConvertLayer.cpp
+++ b/tests/validation/reference/DepthConvertLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2023 Arm Limited.
+ * Copyright (c) 2017-2020, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -165,7 +165,7 @@ template SimpleTensor<half> depth_convert(const SimpleTensor<int32_t> &src, Data
template SimpleTensor<float> depth_convert(const SimpleTensor<int32_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
// BFLOAT16
-template SimpleTensor<float> depth_convert(const SimpleTensor<bfloat16> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
+template SimpleTensor<bfloat16> depth_convert(const SimpleTensor<bfloat16> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
// F16
template SimpleTensor<uint8_t> depth_convert(const SimpleTensor<half> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp
index f7e97e47b8..20f1139a02 100644
--- a/tests/validation/reference/GEMM.cpp
+++ b/tests/validation/reference/GEMM.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,10 +35,11 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
+SimpleTensor<T>
+gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
{
// Create reference
- SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
+ SimpleTensor<T> dst{c.shape(), c.data_type(), 1};
// Compute reference
const int M = a.shape().y();
@@ -50,15 +51,22 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
const int a_stride_z = K * M;
const int a_stride_w = K * M * D;
- const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
- int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
+ const int b_stride_z =
+ b.shape().num_dimensions() > 2
+ ? N * K
+ : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
+ int b_stride_w =
+ b.shape().num_dimensions() > 3
+ ? K * N * D
+ : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
// Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions:
// it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4
- const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
+ const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 &&
+ c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
// Batched-GEMM
- if(is_batched_gemm)
+ if (is_batched_gemm)
{
b_stride_w = b_stride_z;
}
@@ -69,21 +77,21 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
#if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__))
#pragma omp parallel for collapse(2)
#endif /* _OPENMP */
- for(int w = 0; w < W; ++w)
+ for (int w = 0; w < W; ++w)
{
- for(int depth = 0; depth < D; ++depth)
+ for (int depth = 0; depth < D; ++depth)
{
const int base_addr_a = depth * a_stride_z + w * a_stride_w;
const int base_addr_b = depth * b_stride_z + w * b_stride_w;
const int base_addr_c = depth * c_stride_z + w * c_stride_w;
- for(int row = 0; row < M; ++row)
+ for (int row = 0; row < M; ++row)
{
- for(int col = 0; col < N; ++col)
+ for (int col = 0; col < N; ++col)
{
T acc(0);
- for(int k = 0; k < K; ++k)
+ for (int k = 0; k < K; ++k)
{
acc += a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N];
}
@@ -99,11 +107,12 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
}
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
+SimpleTensor<T> gemm_mixed_precision(
+ const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
{
// GEMM mixed-precision combines F32 accumulators with F16 multiplications
// Create reference
- SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
+ SimpleTensor<T> dst{c.shape(), c.data_type(), 1};
// Compute reference
const int M = a.shape().y();
@@ -115,15 +124,22 @@ SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTenso
const int a_stride_z = K * M;
const int a_stride_w = K * M * D;
- const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
- int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
+ const int b_stride_z =
+ b.shape().num_dimensions() > 2
+ ? N * K
+ : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
+ int b_stride_w =
+ b.shape().num_dimensions() > 3
+ ? K * N * D
+ : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
// Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions:
// it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4
- const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
+ const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 &&
+ c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
// Batched-GEMM
- if(is_batched_gemm)
+ if (is_batched_gemm)
{
b_stride_w = b_stride_z;
}
@@ -134,27 +150,28 @@ SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTenso
#if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__))
#pragma omp parallel for collapse(2)
#endif /* _OPENMP */
- for(int w = 0; w < W; ++w)
+ for (int w = 0; w < W; ++w)
{
- for(int depth = 0; depth < D; ++depth)
+ for (int depth = 0; depth < D; ++depth)
{
const int base_addr_a = depth * a_stride_z + w * a_stride_w;
const int base_addr_b = depth * b_stride_z + w * b_stride_w;
const int base_addr_c = depth * c_stride_z + w * c_stride_w;
- for(int row = 0; row < M; ++row)
+ for (int row = 0; row < M; ++row)
{
- for(int col = 0; col < N; ++col)
+ for (int col = 0; col < N; ++col)
{
float acc(0);
- for(int k = 0; k < K; ++k)
+ for (int k = 0; k < K; ++k)
{
acc += static_cast<float>(a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N]);
}
// Finalize the result: alpha * A * B + beta * C
- dst[base_addr_c + col + row * N] = static_cast<T>(alpha * acc + beta * c[base_addr_c + col + row * N]);
+ dst[base_addr_c + col + row * N] =
+ static_cast<T>(alpha * acc + beta * c[base_addr_c + col + row * N]);
}
}
}
@@ -163,9 +180,17 @@ SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTenso
return dst;
}
-template SimpleTensor<float> gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
-template SimpleTensor<half> gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
-template SimpleTensor<half> gemm_mixed_precision(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
+template SimpleTensor<float>
+gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
+template SimpleTensor<bfloat16> gemm(const SimpleTensor<bfloat16> &a,
+ const SimpleTensor<bfloat16> &b,
+ const SimpleTensor<bfloat16> &c,
+ float alpha,
+ float beta);
+template SimpleTensor<half>
+gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
+template SimpleTensor<half> gemm_mixed_precision(
+ const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/Permute.cpp b/tests/validation/reference/Permute.cpp
index 6f122b1bf5..7aa3011d8f 100644
--- a/tests/validation/reference/Permute.cpp
+++ b/tests/validation/reference/Permute.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Arm Limited.
+ * Copyright (c) 2017-2019,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#include "Permute.h"
#include "arm_compute/core/Types.h"
+
#include "tests/validation/Helpers.h"
namespace arm_compute
@@ -42,11 +43,11 @@ SimpleTensor<T> permute(const SimpleTensor<T> &src, PermutationVector perm)
permute(dst_shape, perm);
// Create reference
- SimpleTensor<T> dst{ dst_shape, src.data_type(), src.num_channels(), src.quantization_info() };
+ SimpleTensor<T> dst{dst_shape, src.data_type(), src.num_channels(), src.quantization_info()};
// Compute reference
const uint32_t num_elements = src.num_elements();
- for(uint32_t i = 0; i < num_elements; ++i)
+ for (uint32_t i = 0; i < num_elements; ++i)
{
const Coordinates src_coords = index2coord(src.shape(), i);
Coordinates dst_coords = src_coords;
@@ -58,13 +59,14 @@ SimpleTensor<T> permute(const SimpleTensor<T> &src, PermutationVector perm)
return dst;
}
-template SimpleTensor<int8_t> permute(const SimpleTensor<int8_t> &src, PermutationVector perm);
-template SimpleTensor<uint8_t> permute(const SimpleTensor<uint8_t> &src, PermutationVector perm);
-template SimpleTensor<int16_t> permute(const SimpleTensor<int16_t> &src, PermutationVector perm);
+template SimpleTensor<int8_t> permute(const SimpleTensor<int8_t> &src, PermutationVector perm);
+template SimpleTensor<uint8_t> permute(const SimpleTensor<uint8_t> &src, PermutationVector perm);
+template SimpleTensor<int16_t> permute(const SimpleTensor<int16_t> &src, PermutationVector perm);
template SimpleTensor<uint16_t> permute(const SimpleTensor<uint16_t> &src, PermutationVector perm);
template SimpleTensor<uint32_t> permute(const SimpleTensor<uint32_t> &src, PermutationVector perm);
-template SimpleTensor<float> permute(const SimpleTensor<float> &src, PermutationVector perm);
-template SimpleTensor<half> permute(const SimpleTensor<half> &src, PermutationVector perm);
+template SimpleTensor<float> permute(const SimpleTensor<float> &src, PermutationVector perm);
+template SimpleTensor<half> permute(const SimpleTensor<half> &src, PermutationVector perm);
+template SimpleTensor<bfloat16> permute(const SimpleTensor<bfloat16> &src, PermutationVector perm);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/ReshapeLayer.cpp b/tests/validation/reference/ReshapeLayer.cpp
index daea001be6..30a58dd65b 100644
--- a/tests/validation/reference/ReshapeLayer.cpp
+++ b/tests/validation/reference/ReshapeLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Arm Limited.
+ * Copyright (c) 2017,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,14 +44,15 @@ SimpleTensor<T> reshape_layer(const SimpleTensor<T> &src, const TensorShape &out
return dst;
}
-template SimpleTensor<uint8_t> reshape_layer(const SimpleTensor<uint8_t> &src, const TensorShape &output_shape);
-template SimpleTensor<int8_t> reshape_layer(const SimpleTensor<int8_t> &src, const TensorShape &output_shape);
+template SimpleTensor<uint8_t> reshape_layer(const SimpleTensor<uint8_t> &src, const TensorShape &output_shape);
+template SimpleTensor<int8_t> reshape_layer(const SimpleTensor<int8_t> &src, const TensorShape &output_shape);
template SimpleTensor<uint16_t> reshape_layer(const SimpleTensor<uint16_t> &src, const TensorShape &output_shape);
-template SimpleTensor<int16_t> reshape_layer(const SimpleTensor<int16_t> &src, const TensorShape &output_shape);
+template SimpleTensor<int16_t> reshape_layer(const SimpleTensor<int16_t> &src, const TensorShape &output_shape);
template SimpleTensor<uint32_t> reshape_layer(const SimpleTensor<uint32_t> &src, const TensorShape &output_shape);
-template SimpleTensor<int32_t> reshape_layer(const SimpleTensor<int32_t> &src, const TensorShape &output_shape);
-template SimpleTensor<half> reshape_layer(const SimpleTensor<half> &src, const TensorShape &output_shape);
-template SimpleTensor<float> reshape_layer(const SimpleTensor<float> &src, const TensorShape &output_shape);
+template SimpleTensor<int32_t> reshape_layer(const SimpleTensor<int32_t> &src, const TensorShape &output_shape);
+template SimpleTensor<half> reshape_layer(const SimpleTensor<half> &src, const TensorShape &output_shape);
+template SimpleTensor<float> reshape_layer(const SimpleTensor<float> &src, const TensorShape &output_shape);
+template SimpleTensor<bfloat16> reshape_layer(const SimpleTensor<bfloat16> &src, const TensorShape &output_shape);
/** [ReshapeLayer] **/
} // namespace reference
} // namespace validation