aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/MatMul.cpp182
-rw-r--r--tests/validation/fixtures/MatMulFixture.h60
2 files changed, 167 insertions, 75 deletions
diff --git a/tests/validation/CL/MatMul.cpp b/tests/validation/CL/MatMul.cpp
index 6364b16200..5a262a8e78 100644
--- a/tests/validation/CL/MatMul.cpp
+++ b/tests/validation/CL/MatMul.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/runtime/CL/functions/CLMatMul.h"
#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/ActivationFunctionsDataset.h"
#include "tests/framework/DatasetModes.h"
#include "tests/framework/Macros.h"
#include "tests/framework/TestCase.h"
@@ -44,11 +45,13 @@ namespace validation
{
namespace
{
-RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for fp32 data type */
-constexpr float abs_tolerance_f32(0.0001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for fp32 data type in case using relative tolerance fails because of small values */
-constexpr float abs_tolerance_f16(0.001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for fp16 data type in case using relative tolerance fails because of small values */
-RelativeTolerance<half_float::half> tolerance_f16(half(0.01)); /**< Tolerance value for comparing reference's output against implementation's output for fp16 data type */
-constexpr AbsoluteTolerance<uint8_t> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
+RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for fp32 data type */
+constexpr float abs_tolerance_f32(
+ 0.0001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for fp32 data type in case using relative tolerance fails because of small values */
+constexpr float abs_tolerance_f16(
+ 0.001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for fp16 data type in case using relative tolerance fails because of small values */
+RelativeTolerance<half_float::half> tolerance_f16(half(0.01)); /**< Tolerance value for comparing reference's output against implementation's output for fp16 data type */
+constexpr AbsoluteTolerance<uint8_t> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
} // namespace
template <typename T>
@@ -57,25 +60,71 @@ using CLMatMulFixture = MatMulValidationFixture<CLTensor, CLAccessor, CLMatMul,
template <typename T>
using CLQuantizedMatMulFixture = QuantizedMatMulValidationFixture<CLTensor, CLAccessor, CLMatMul, GpuMatMulSettings, T>;
+template <typename T>
+using CLMatMulActivationFixture = MatMulValidationWithActivationFixture<CLTensor, CLAccessor, CLMatMul, GpuMatMulSettings, T>;
+
+template <typename T>
+using CLMatMulActivationAlphaBetaFixture = MatMulValidationWithActivationAlphaBetaFixture<CLTensor, CLAccessor, CLMatMul, GpuMatMulSettings, T>;
+
+template <typename T>
+using CLQuantizedMatMulActivationFixture = QuantizedMatMulValidationWithActivationFixture<CLTensor, CLAccessor, CLMatMul, GpuMatMulSettings, T>;
+
+/* The main act functions matmul is expected to support */
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.75f, 0.25f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH)
+});
+
+const auto ActivationFunctionsQuantizedDataset = concat(concat(concat(
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo()),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f))),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.75f, 0.25f)));
+
+/* Larger activation functions dataset, used during some nightly tests. */
+const auto AllActivationsDataset = combine(datasets::ActivationFunctions(), framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
+
+const auto AllQuantizedActivationsDataset = combine(concat(datasets::ActivationFunctionsQuantized(),
+ framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::HARD_SWISH,
+ ActivationLayerInfo::ActivationFunction::LEAKY_RELU
+ })),
+ framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
+
TEST_SUITE(CL)
TEST_SUITE(MatMul)
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallMatMulDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulActivationFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLMatMulActivationFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeMatMulDataset(),
framework::dataset::make("TransposeA", { false, true })),
- framework::dataset::make("TransposeB", { false, true })),
- framework::dataset::make("DataType", DataType::F32)))
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLMatMulFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeMatMulDataset(),
- framework::dataset::make("TransposeA", { false, true })),
- framework::dataset::make("TransposeB", { false, true })),
- framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunAllActivations, CLMatMulActivationAlphaBetaFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SmallerMatMulDataset(),
+ framework::dataset::make("TransposeA", { false })),
+ framework::dataset::make("TransposeB", { true })),
+ framework::dataset::make("DataType", DataType::F32)),
+ AllActivationsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32);
@@ -85,19 +134,21 @@ TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallMatMulDataset(),
- framework::dataset::make("TransposeA", { false, true })),
- framework::dataset::make("TransposeB", { false, true })),
- framework::dataset::make("DataType", DataType::F16)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLMatMulActivationFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F16)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLMatMulFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeMatMulDataset(),
- framework::dataset::make("TransposeA", { false, true })),
- framework::dataset::make("TransposeB", { false, true })),
- framework::dataset::make("DataType", DataType::F16)))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLMatMulActivationFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::F16)),
+ ActivationFunctionsDataset))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16);
@@ -110,32 +161,30 @@ TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmall, CLQuantizedMatMulFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(
- datasets::SmallMatMulDataset(),
- framework::dataset::make("TransposeA", { false, true })),
- framework::dataset::make("TransposeB", { false, true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
- framework::dataset::make("NumberOfExtraRuns", { 0, 1 })),
- framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) })),
- framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) })),
- framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) }))
-)
+ datasets::SmallMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ ActivationFunctionsQuantizedDataset),
+ framework::dataset::make("NumberOfExtraRuns", { 0, 1 })),
+ framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) })),
+ framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) })),
+ framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_quant);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLQuantizedMatMulFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine(
- datasets::LargeMatMulDataset(),
- framework::dataset::make("TransposeA", { false, true })),
- framework::dataset::make("TransposeB", { false, true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
- framework::dataset::make("NumberOfExtraRuns", { 0, 1 })),
- framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) })),
- framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) })),
- framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) }))
-)
+ datasets::LargeMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ ActivationFunctionsQuantizedDataset),
+ framework::dataset::make("NumberOfExtraRuns", { 0, 1 })),
+ framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) })),
+ framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) })),
+ framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_quant);
@@ -146,32 +195,45 @@ TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
FIXTURE_DATA_TEST_CASE(RunSmall, CLQuantizedMatMulFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(
- datasets::SmallMatMulDataset(),
- framework::dataset::make("TransposeA", { false, true })),
- framework::dataset::make("TransposeB", { false, true })),
- framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
- framework::dataset::make("NumberOfExtraRuns", { 0, 1 })),
- framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) })),
- framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) })),
- framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) }))
-)
+ datasets::SmallMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ ActivationFunctionsQuantizedDataset),
+ framework::dataset::make("NumberOfExtraRuns", { 0, 1 })),
+ framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) })),
+ framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) })),
+ framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_quant);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLQuantizedMatMulFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine(
- datasets::LargeMatMulDataset(),
- framework::dataset::make("TransposeA", { false, true })),
- framework::dataset::make("TransposeB", { false, true })),
- framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
- framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
- framework::dataset::make("NumberOfExtraRuns", { 0, 1 })),
- framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) })),
- framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) })),
- framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) }))
-)
+ datasets::LargeMatMulDataset(),
+ framework::dataset::make("TransposeA", { false, true })),
+ framework::dataset::make("TransposeB", { false, true })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ ActivationFunctionsQuantizedDataset),
+ framework::dataset::make("NumberOfExtraRuns", { 0, 1 })),
+ framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) })),
+ framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) })),
+ framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_quant);
+}
+
+FIXTURE_DATA_TEST_CASE(RunAllActivations, CLQuantizedMatMulActivationFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine(
+ datasets::LargeMatMulDataset(),
+ framework::dataset::make("TransposeA", { false })),
+ framework::dataset::make("TransposeB", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ AllQuantizedActivationsDataset),
+ framework::dataset::make("NumberOfExtraRuns", { 0, 1 })),
+ framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) })),
+ framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) })),
+ framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_quant);
diff --git a/tests/validation/fixtures/MatMulFixture.h b/tests/validation/fixtures/MatMulFixture.h
index 2f94c1f9d2..3e4cac5e34 100644
--- a/tests/validation/fixtures/MatMulFixture.h
+++ b/tests/validation/fixtures/MatMulFixture.h
@@ -112,14 +112,14 @@ protected:
// Configure MatMulInfo class
MatMulInfo mm_info;
- mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b).fused_activation(act_info);
+ mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b);
// Ensure values are dynamic
a.info()->set_are_values_constant(false);
b.info()->set_are_values_constant(false);
// Configure operator
- matmul.configure(&a, &b, &dst, mm_info, settings);
+ matmul.configure(&a, &b, &dst, mm_info, settings, act_info);
// Assertions
ARM_COMPUTE_ASSERT(a.info()->is_resizable());
@@ -162,8 +162,8 @@ protected:
}
template <typename TT>
- typename std::enable_if<!std::is_integral<TT>::value, SimpleTensor<TT>>::type
- compute_reference_gemm(const SimpleTensor<TT> &a, const SimpleTensor<TT> &b, const SimpleTensor<TT> &c, float alpha, float beta, const QuantizationInfo &o_qinfo)
+ typename std::enable_if < !std::is_integral<TT>::value, SimpleTensor<TT >>::type
+ compute_reference_gemm(const SimpleTensor<TT> &a, const SimpleTensor<TT> &b, const SimpleTensor<TT> &c, float alpha, float beta, const QuantizationInfo &o_qinfo)
{
ARM_COMPUTE_UNUSED(o_qinfo);
@@ -172,7 +172,7 @@ protected:
template <typename TT>
typename std::enable_if<std::is_integral<TT>::value, SimpleTensor<TT>>::type
- compute_reference_gemm(const SimpleTensor<TT> &a, const SimpleTensor<TT> &b, const SimpleTensor<TT> &c, float alpha, float beta, const QuantizationInfo &o_qinfo)
+ compute_reference_gemm(const SimpleTensor<TT> &a, const SimpleTensor<TT> &b, const SimpleTensor<TT> &c, float alpha, float beta, const QuantizationInfo &o_qinfo)
{
ARM_COMPUTE_UNUSED(alpha, beta);
@@ -183,18 +183,18 @@ protected:
const auto multiplier = aq.scale * bq.scale / oq.scale;
int32_t output_multiplier = 0;
- int32_t output_shift = 0;
+ int32_t output_shift = 0;
quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
std::vector<int32_t> output_multipliers{ output_multiplier };
std::vector<int32_t> output_shifts{ output_shift };
//The lhs and rhs offsets are negated here to keep the reference aligned with the function implementation where the lhs and rhs offsets are also negated.
const auto tmp = reference::gemmlowp_matrix_multiply_core<int32_t>(
- a, b, c.shape(), -aq.offset, -bq.offset);
+ a, b, c.shape(), -aq.offset, -bq.offset);
auto output = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TT>(
- tmp, output_multipliers, output_shifts, oq.offset,
- std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
+ tmp, output_multipliers, output_shifts, oq.offset,
+ std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
output.quantization_info(o_qinfo);
return output;
@@ -280,6 +280,30 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class MatMulValidationWithDynamicTensorsFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs)
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
+class QuantizedMatMulValidationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs,
+ QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
+ {
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(),
+ a_qinfo, b_qinfo, o_qinfo);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
class MatMulValidationWithActivationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
{
public:
@@ -291,24 +315,30 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
-class MatMulValidationWithDynamicTensorsFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+class MatMulValidationWithActivationAlphaBetaFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
{
public:
template <typename...>
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo::ActivationFunction function,
+ float alpha_beta)
{
- MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings());
+ ActivationLayerInfo act_info(function, alpha_beta, alpha_beta);
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings());
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
-class QuantizedMatMulValidationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
+class QuantizedMatMulValidationWithActivationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
{
public:
template <typename...>
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs, QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo::ActivationFunction function,
+ float alpha_beta, int num_extra_runs,
+ QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
{
- MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(), a_qinfo, b_qinfo, o_qinfo);
+ ActivationLayerInfo act_info(function, alpha_beta, alpha_beta);
+ MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(),
+ a_qinfo, b_qinfo, o_qinfo);
}
};