aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference
diff options
context:
space:
mode:
authorRenato Arantes <renato.arantes@arm.com>2024-01-26 17:31:18 +0000
committerRenato Barros Arantes <renato.arantes@arm.com>2024-03-21 11:15:30 +0000
commit36a75dafdbe6d6a3a6f50bd075fe01f5b7dace38 (patch)
tree0701d615ef30444b9d0789db691b59b81fd9e86e /tests/validation/reference
parentd2191150736dde66d79eb97e0c8ee506eef3c8fc (diff)
downloadComputeLibrary-36a75dafdbe6d6a3a6f50bd075fe01f5b7dace38.tar.gz
[ONCPUML-1451] Add matmul kernel to enable bf16 to bf16 operations via PyTorch® autocast() function
The full range of tests must be added with [MLINFSW-482] epic due to the lack of reordering kernels implemented in Acl. Co-Authored-By: David Mansell <David.Mansell@arm.com> Change-Id: I820d316295a1ec94fdc89c37e4144a268f914c36 Signed-off-by: Renato Arantes <renato.arantes@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11169 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/reference')
-rw-r--r--tests/validation/reference/ActivationLayer.cpp27
-rw-r--r--tests/validation/reference/ActivationLayer.h23
-rw-r--r--tests/validation/reference/DepthConvertLayer.cpp4
-rw-r--r--tests/validation/reference/GEMM.cpp79
-rw-r--r--tests/validation/reference/Permute.cpp18
-rw-r--r--tests/validation/reference/ReshapeLayer.cpp15
6 files changed, 103 insertions, 63 deletions
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index 664b969125..2172362bdd 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2020,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#include "ActivationLayer.h"
#include "arm_compute/core/Types.h"
+
#include "tests/validation/Helpers.h"
namespace arm_compute
@@ -40,7 +41,7 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
ARM_COMPUTE_UNUSED(oq_info);
// Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
+ SimpleTensor<T> dst{src.shape(), src.data_type(), 1};
// Compute reference
const T a(info.a());
@@ -48,7 +49,7 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
#if defined(_OPENMP)
#pragma omp parallel for
#endif /* _OPENMP */
- for(int i = 0; i < src.num_elements(); ++i)
+ for (int i = 0; i < src.num_elements(); ++i)
{
dst[i] = activate_float<T>(src[i], a, b, info.activation());
}
@@ -57,7 +58,8 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
}
template <>
-SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
+SimpleTensor<uint8_t>
+activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
{
const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
@@ -68,7 +70,8 @@ SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src
}
template <>
-SimpleTensor<int8_t> activation_layer<int8_t>(const SimpleTensor<int8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
+SimpleTensor<int8_t>
+activation_layer<int8_t>(const SimpleTensor<int8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
{
const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
@@ -79,7 +82,8 @@ SimpleTensor<int8_t> activation_layer<int8_t>(const SimpleTensor<int8_t> &src, A
}
template <>
-SimpleTensor<int16_t> activation_layer<int16_t>(const SimpleTensor<int16_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
+SimpleTensor<int16_t>
+activation_layer<int16_t>(const SimpleTensor<int16_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
{
const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
@@ -88,9 +92,14 @@ SimpleTensor<int16_t> activation_layer<int16_t>(const SimpleTensor<int16_t> &src
SimpleTensor<int16_t> dst = convert_to_symmetric<int16_t>(dst_tmp, dst_qinfo);
return dst;
}
-template SimpleTensor<int32_t> activation_layer(const SimpleTensor<int32_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
-template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
-template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
+template SimpleTensor<int32_t>
+activation_layer(const SimpleTensor<int32_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
+template SimpleTensor<float>
+activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
+template SimpleTensor<half>
+activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
+template SimpleTensor<bfloat16>
+activation_layer(const SimpleTensor<bfloat16> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h
index a813ba5037..7f896bd696 100644
--- a/tests/validation/reference/ActivationLayer.h
+++ b/tests/validation/reference/ActivationLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2022 Arm Limited.
+ * Copyright (c) 2017-2020,2022,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ACTIVATION_LAYER_H
-#define ARM_COMPUTE_TEST_ACTIVATION_LAYER_H
+#ifndef ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H
+#define ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H
#include "tests/SimpleTensor.h"
#include "tests/validation/Helpers.h"
@@ -40,7 +40,7 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
{
T ret;
- switch(activation)
+ switch (activation)
{
case ActivationLayerInfo::ActivationFunction::ABS:
ret = std::abs(x);
@@ -61,13 +61,13 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
ret = std::min<T>(a, std::max<T>(b, x));
break;
case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
- ret = (x > 0) ? x : a * x;
+ ret = x > static_cast<T>(0) ? x : static_cast<T>(a * x);
break;
case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
ret = std::log(static_cast<T>(1) + std::exp(static_cast<double>(x)));
break;
case ActivationLayerInfo::ActivationFunction::ELU:
- ret = (x > 0) ? x : a * (std::exp(x) - static_cast<T>(1));
+ ret = x > static_cast<T>(0) ? x : static_cast<T>(a * (std::exp(x) - static_cast<T>(1)));
break;
case ActivationLayerInfo::ActivationFunction::SQRT:
ret = std::sqrt(x);
@@ -82,10 +82,11 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
ret = x;
break;
case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
- ret = x * ((std::min(std::max(static_cast<T>(x + 3), static_cast<T>(0.0f)), static_cast<T>(6.0f))) * 0.166666667f);
+ ret = x * ((std::min(std::max(static_cast<T>(x + 3), static_cast<T>(0.0f)), static_cast<T>(6.0f))) *
+ 0.166666667f);
break;
case ActivationLayerInfo::ActivationFunction::SWISH:
- ret = static_cast<T>(x) / (static_cast<T>(1) + std::exp(-a*x));
+ ret = static_cast<T>(x) / (static_cast<T>(1) + std::exp(-a * x));
break;
case ActivationLayerInfo::ActivationFunction::GELU:
ret = x * 0.5f * (1 + erf(x / std::sqrt(2.0f)));
@@ -99,9 +100,11 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
}
template <typename T>
-SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info = QuantizationInfo());
+SimpleTensor<T> activation_layer(const SimpleTensor<T> &src,
+ ActivationLayerInfo info,
+ const QuantizationInfo &oq_info = QuantizationInfo());
} // namespace reference
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ACTIVATION_LAYER_H */
+#endif // ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H
diff --git a/tests/validation/reference/DepthConvertLayer.cpp b/tests/validation/reference/DepthConvertLayer.cpp
index 1e4939129e..3f88897f8e 100644
--- a/tests/validation/reference/DepthConvertLayer.cpp
+++ b/tests/validation/reference/DepthConvertLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2023 Arm Limited.
+ * Copyright (c) 2017-2020, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -165,7 +165,7 @@ template SimpleTensor<half> depth_convert(const SimpleTensor<int32_t> &src, Data
template SimpleTensor<float> depth_convert(const SimpleTensor<int32_t> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
// BFLOAT16
-template SimpleTensor<float> depth_convert(const SimpleTensor<bfloat16> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
+template SimpleTensor<bfloat16> depth_convert(const SimpleTensor<bfloat16> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
// F16
template SimpleTensor<uint8_t> depth_convert(const SimpleTensor<half> &src, DataType dt_out, ConvertPolicy policy, uint32_t shift);
diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp
index f7e97e47b8..20f1139a02 100644
--- a/tests/validation/reference/GEMM.cpp
+++ b/tests/validation/reference/GEMM.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,10 +35,11 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
+SimpleTensor<T>
+gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
{
// Create reference
- SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
+ SimpleTensor<T> dst{c.shape(), c.data_type(), 1};
// Compute reference
const int M = a.shape().y();
@@ -50,15 +51,22 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
const int a_stride_z = K * M;
const int a_stride_w = K * M * D;
- const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
- int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
+ const int b_stride_z =
+ b.shape().num_dimensions() > 2
+ ? N * K
+ : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
+ int b_stride_w =
+ b.shape().num_dimensions() > 3
+ ? K * N * D
+ : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
// Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions:
// it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4
- const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
+ const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 &&
+ c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
// Batched-GEMM
- if(is_batched_gemm)
+ if (is_batched_gemm)
{
b_stride_w = b_stride_z;
}
@@ -69,21 +77,21 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
#if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__))
#pragma omp parallel for collapse(2)
#endif /* _OPENMP */
- for(int w = 0; w < W; ++w)
+ for (int w = 0; w < W; ++w)
{
- for(int depth = 0; depth < D; ++depth)
+ for (int depth = 0; depth < D; ++depth)
{
const int base_addr_a = depth * a_stride_z + w * a_stride_w;
const int base_addr_b = depth * b_stride_z + w * b_stride_w;
const int base_addr_c = depth * c_stride_z + w * c_stride_w;
- for(int row = 0; row < M; ++row)
+ for (int row = 0; row < M; ++row)
{
- for(int col = 0; col < N; ++col)
+ for (int col = 0; col < N; ++col)
{
T acc(0);
- for(int k = 0; k < K; ++k)
+ for (int k = 0; k < K; ++k)
{
acc += a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N];
}
@@ -99,11 +107,12 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
}
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
+SimpleTensor<T> gemm_mixed_precision(
+ const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
{
// GEMM mixed-precision combines F32 accumulators with F16 multiplications
// Create reference
- SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
+ SimpleTensor<T> dst{c.shape(), c.data_type(), 1};
// Compute reference
const int M = a.shape().y();
@@ -115,15 +124,22 @@ SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTenso
const int a_stride_z = K * M;
const int a_stride_w = K * M * D;
- const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
- int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
+ const int b_stride_z =
+ b.shape().num_dimensions() > 2
+ ? N * K
+ : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
+ int b_stride_w =
+ b.shape().num_dimensions() > 3
+ ? K * N * D
+ : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
// Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions:
// it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4
- const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
+ const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 &&
+ c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1;
// Batched-GEMM
- if(is_batched_gemm)
+ if (is_batched_gemm)
{
b_stride_w = b_stride_z;
}
@@ -134,27 +150,28 @@ SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTenso
#if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__))
#pragma omp parallel for collapse(2)
#endif /* _OPENMP */
- for(int w = 0; w < W; ++w)
+ for (int w = 0; w < W; ++w)
{
- for(int depth = 0; depth < D; ++depth)
+ for (int depth = 0; depth < D; ++depth)
{
const int base_addr_a = depth * a_stride_z + w * a_stride_w;
const int base_addr_b = depth * b_stride_z + w * b_stride_w;
const int base_addr_c = depth * c_stride_z + w * c_stride_w;
- for(int row = 0; row < M; ++row)
+ for (int row = 0; row < M; ++row)
{
- for(int col = 0; col < N; ++col)
+ for (int col = 0; col < N; ++col)
{
float acc(0);
- for(int k = 0; k < K; ++k)
+ for (int k = 0; k < K; ++k)
{
acc += static_cast<float>(a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N]);
}
// Finalize the result: alpha * A * B + beta * C
- dst[base_addr_c + col + row * N] = static_cast<T>(alpha * acc + beta * c[base_addr_c + col + row * N]);
+ dst[base_addr_c + col + row * N] =
+ static_cast<T>(alpha * acc + beta * c[base_addr_c + col + row * N]);
}
}
}
@@ -163,9 +180,17 @@ SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTenso
return dst;
}
-template SimpleTensor<float> gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
-template SimpleTensor<half> gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
-template SimpleTensor<half> gemm_mixed_precision(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
+template SimpleTensor<float>
+gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
+template SimpleTensor<bfloat16> gemm(const SimpleTensor<bfloat16> &a,
+ const SimpleTensor<bfloat16> &b,
+ const SimpleTensor<bfloat16> &c,
+ float alpha,
+ float beta);
+template SimpleTensor<half>
+gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
+template SimpleTensor<half> gemm_mixed_precision(
+ const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/Permute.cpp b/tests/validation/reference/Permute.cpp
index 6f122b1bf5..7aa3011d8f 100644
--- a/tests/validation/reference/Permute.cpp
+++ b/tests/validation/reference/Permute.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Arm Limited.
+ * Copyright (c) 2017-2019,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#include "Permute.h"
#include "arm_compute/core/Types.h"
+
#include "tests/validation/Helpers.h"
namespace arm_compute
@@ -42,11 +43,11 @@ SimpleTensor<T> permute(const SimpleTensor<T> &src, PermutationVector perm)
permute(dst_shape, perm);
// Create reference
- SimpleTensor<T> dst{ dst_shape, src.data_type(), src.num_channels(), src.quantization_info() };
+ SimpleTensor<T> dst{dst_shape, src.data_type(), src.num_channels(), src.quantization_info()};
// Compute reference
const uint32_t num_elements = src.num_elements();
- for(uint32_t i = 0; i < num_elements; ++i)
+ for (uint32_t i = 0; i < num_elements; ++i)
{
const Coordinates src_coords = index2coord(src.shape(), i);
Coordinates dst_coords = src_coords;
@@ -58,13 +59,14 @@ SimpleTensor<T> permute(const SimpleTensor<T> &src, PermutationVector perm)
return dst;
}
-template SimpleTensor<int8_t> permute(const SimpleTensor<int8_t> &src, PermutationVector perm);
-template SimpleTensor<uint8_t> permute(const SimpleTensor<uint8_t> &src, PermutationVector perm);
-template SimpleTensor<int16_t> permute(const SimpleTensor<int16_t> &src, PermutationVector perm);
+template SimpleTensor<int8_t> permute(const SimpleTensor<int8_t> &src, PermutationVector perm);
+template SimpleTensor<uint8_t> permute(const SimpleTensor<uint8_t> &src, PermutationVector perm);
+template SimpleTensor<int16_t> permute(const SimpleTensor<int16_t> &src, PermutationVector perm);
template SimpleTensor<uint16_t> permute(const SimpleTensor<uint16_t> &src, PermutationVector perm);
template SimpleTensor<uint32_t> permute(const SimpleTensor<uint32_t> &src, PermutationVector perm);
-template SimpleTensor<float> permute(const SimpleTensor<float> &src, PermutationVector perm);
-template SimpleTensor<half> permute(const SimpleTensor<half> &src, PermutationVector perm);
+template SimpleTensor<float> permute(const SimpleTensor<float> &src, PermutationVector perm);
+template SimpleTensor<half> permute(const SimpleTensor<half> &src, PermutationVector perm);
+template SimpleTensor<bfloat16> permute(const SimpleTensor<bfloat16> &src, PermutationVector perm);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/ReshapeLayer.cpp b/tests/validation/reference/ReshapeLayer.cpp
index daea001be6..30a58dd65b 100644
--- a/tests/validation/reference/ReshapeLayer.cpp
+++ b/tests/validation/reference/ReshapeLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Arm Limited.
+ * Copyright (c) 2017,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,14 +44,15 @@ SimpleTensor<T> reshape_layer(const SimpleTensor<T> &src, const TensorShape &out
return dst;
}
-template SimpleTensor<uint8_t> reshape_layer(const SimpleTensor<uint8_t> &src, const TensorShape &output_shape);
-template SimpleTensor<int8_t> reshape_layer(const SimpleTensor<int8_t> &src, const TensorShape &output_shape);
+template SimpleTensor<uint8_t> reshape_layer(const SimpleTensor<uint8_t> &src, const TensorShape &output_shape);
+template SimpleTensor<int8_t> reshape_layer(const SimpleTensor<int8_t> &src, const TensorShape &output_shape);
template SimpleTensor<uint16_t> reshape_layer(const SimpleTensor<uint16_t> &src, const TensorShape &output_shape);
-template SimpleTensor<int16_t> reshape_layer(const SimpleTensor<int16_t> &src, const TensorShape &output_shape);
+template SimpleTensor<int16_t> reshape_layer(const SimpleTensor<int16_t> &src, const TensorShape &output_shape);
template SimpleTensor<uint32_t> reshape_layer(const SimpleTensor<uint32_t> &src, const TensorShape &output_shape);
-template SimpleTensor<int32_t> reshape_layer(const SimpleTensor<int32_t> &src, const TensorShape &output_shape);
-template SimpleTensor<half> reshape_layer(const SimpleTensor<half> &src, const TensorShape &output_shape);
-template SimpleTensor<float> reshape_layer(const SimpleTensor<float> &src, const TensorShape &output_shape);
+template SimpleTensor<int32_t> reshape_layer(const SimpleTensor<int32_t> &src, const TensorShape &output_shape);
+template SimpleTensor<half> reshape_layer(const SimpleTensor<half> &src, const TensorShape &output_shape);
+template SimpleTensor<float> reshape_layer(const SimpleTensor<float> &src, const TensorShape &output_shape);
+template SimpleTensor<bfloat16> reshape_layer(const SimpleTensor<bfloat16> &src, const TensorShape &output_shape);
/** [ReshapeLayer] **/
} // namespace reference
} // namespace validation