diff options
author | Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com> | 2023-03-23 22:21:31 +0000 |
---|---|---|
committer | Mohmun02 <MohammedSuhail.Munshi@arm.com> | 2023-04-13 09:24:52 +0000 |
commit | a1b1e41bb261f5613f443fed7071936a360686ed (patch) | |
tree | eff2978a682fb24c8078df9c6c796fde51074255 /tests/validation/fixtures | |
parent | 8b7f42aa0e76a65a4ffa46ee875df6a6220695ae (diff) | |
download | ComputeLibrary-a1b1e41bb261f5613f443fed7071936a360686ed.tar.gz |
Implement MatMul Function and Operator with Floating Point support for CPU
- Implements MatMul function and operator for floating point datatype FP16/FP32
- Includes support for transposing dynamic tensors prior to matrix multiplication.
- Adds tests for 2D/3D/4D+ tensors in MatMul with F32/F16 datatype (with all combinations of transposed/not-transposed tensors)
- Updates fixture to allow for testing fused activation in MatMul
- Adds tests for matmul with and without fused activation
Resolved: [COMPMID-5898]
Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com>
Change-Id: Iefa84b26dd723c9a51e6c3f91023152c6c31ace2
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9411
Reviewed-by: SiCong Li <sicong.li@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures')
-rw-r--r-- | tests/validation/fixtures/MatMulFixture.h | 147 |
1 files changed, 111 insertions, 36 deletions
diff --git a/tests/validation/fixtures/MatMulFixture.h b/tests/validation/fixtures/MatMulFixture.h index 1112dcb2fb..bb4a1cd7be 100644 --- a/tests/validation/fixtures/MatMulFixture.h +++ b/tests/validation/fixtures/MatMulFixture.h @@ -26,35 +26,38 @@ #include "arm_compute/core/Types.h" #include "tests/framework/Fixture.h" +#include "tests/validation/reference/ActivationLayer.h" #include "tests/validation/reference/GEMM.h" #include "tests/validation/reference/Permute.h" -#include "tests/validation/reference/Permute.h" #include "tests/validation/reference/ReshapeLayer.h" #include <random> + namespace arm_compute { namespace test { namespace validation { -template <typename TensorType, typename AccessorType, typename FunctionType, typename T> -class MatMulValidationFixture : public framework::Fixture +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class MatMulGenericValidationFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type) + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs, + Settings settings) { - // For brevity, the input shapes are assumed to be not-transposed for both Lhs and Rhs matrices. - if(pretranspose_a) + // For brevity, the input shapes are assumed to be not-transposed for both a and b matrices. + if(transpose_a) { permute(shape_a, PermutationVector(1U, 0U)); } - if(pretranspose_b) + if(transpose_b) { permute(shape_b, PermutationVector(1U, 0U)); } - _target = compute_target(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type); - _reference = compute_reference(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type); + + _target = compute_target(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, settings); + _reference = compute_reference(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info); } protected: @@ -76,49 +79,84 @@ protected: break; } default: + { library->fill_tensor_uniform(tensor, i); + } } } - TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type) + + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool transpose_a, bool transpose_b, DataType data_type, + ActivationLayerInfo act_info, int num_extra_runs, const Settings &settings) { // 1. Create Classes and configure function + // ---------------------------------------------------- // Create tensors - TensorType a = create_tensor<TensorType>(shape_a, data_type, 1); - TensorType b = create_tensor<TensorType>(shape_b, data_type, 1); - TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1); + // Configure relevant classes and matmul function + TensorType a = create_tensor<TensorType>(shape_a, data_type, 1); + TensorType b = create_tensor<TensorType>(shape_b, data_type, 1); + TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1); + FunctionType matmul; + // Configure MatMulInfo class - MatMulInfo info; - info.adj_lhs(pretranspose_a); - info.adj_rhs(pretranspose_b); - matmul.configure(&a, &b, &dst, info); + MatMulInfo mm_info; + mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b).fused_activation(act_info); + + // Ensure values are dynamic + a.info()->set_are_values_constant(false); + b.info()->set_are_values_constant(false); + + // Configure operator + matmul.configure(&a, &b, &dst, mm_info, settings); + // Assertions ARM_COMPUTE_ASSERT(a.info()->is_resizable()); ARM_COMPUTE_ASSERT(b.info()->is_resizable()); ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + // Allocate tensors a.allocator()->allocate(); b.allocator()->allocate(); dst.allocator()->allocate(); + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); - // 2. Fill tensors and run once - // Fill tensors - fill(AccessorType(a), 0); - fill(AccessorType(b), 1); - matmul.run(); // First run + // For multiple runs. + for(int i = 0; i < num_extra_runs; i++) + { + // Stress dynamic tensors by running multiple times. + // -------------------------------------------------------- + // Fill tensors with new seed + // Run function + const int seed_offset = num_extra_runs * 100; + fill(AccessorType(a), seed_offset); + fill(AccessorType(b), seed_offset + 1); + + matmul.run(); + } + + // 2. Final Run for reference comparison + // -------------------------------------------------------- + // Re-fill tensors same seed as reference run + // Compute MatMul operation + fill(AccessorType(a), 2); + fill(AccessorType(b), 3); + + matmul.run(); return dst; } - SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type) + + SimpleTensor<T> compute_reference(const TensorShape &a_shape, const TensorShape &b_shape, const TensorShape &output_shape, bool transpose_a, bool transpose_b, DataType data_type, + ActivationLayerInfo act_info) { // We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 4D // This is necessary unless we choose to extend gemm reference for 5D+ tensors TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimW); - TensorShape a_shape_collapsed = shape_a.collapsed_from(Window::DimW); - TensorShape b_shape_collapsed = shape_b.collapsed_from(Window::DimW); + TensorShape a_shape_collapsed = a_shape.collapsed_from(Window::DimW); + TensorShape b_shape_collapsed = b_shape.collapsed_from(Window::DimW); // Create reference SimpleTensor<T> a{ a_shape_collapsed, data_type, 1 }; @@ -126,18 +164,19 @@ protected: SimpleTensor<T> c{ output_shape_collapsed, data_type, 1 }; // Fill reference - fill(a, 0); - fill(b, 1); + fill(a, 2); + fill(b, 3); - /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_a is set to true, then A is assumed to be (B x K x M), - therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K) - in order to be able to call reference implementation that works with (B x M x K) input. - Similarly, if pretranspose_b is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */ + /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if transpose_a is set to true, then A is assumed to be (B x K x M), + therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K) + in order to be able to call reference implementation that works with (B x M x K) input. + Similarly, if transpose_b is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */ // Define transposed shapes TensorShape a_transposed_shape(a.shape()); a_transposed_shape.set(0, a.shape().y()); a_transposed_shape.set(1, a.shape().x()); + TensorShape b_transposed_shape(b.shape()); b_transposed_shape.set(0, b.shape().y()); b_transposed_shape.set(1, b.shape().x()); @@ -147,13 +186,12 @@ protected: SimpleTensor<T> b_transposed{ b_transposed_shape, data_type }; // pretranspose a if necessary - if(pretranspose_a) + if(transpose_a) { a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U)); } - // pretranspose b if necessary - if(pretranspose_b) + if(transpose_b) { b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U)); } @@ -161,7 +199,8 @@ protected: // Setting beta to 0 will effectively disable C for the // computation of the reference: alpha * A * B + 0 * C // Use transposed tensors if boolean enabled else use original tensors - SimpleTensor<T> result = reference::gemm<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, 1.0f, 0.f); + SimpleTensor<T> result = reference::gemm<T>((transpose_a) ? a_transposed : a, (transpose_b) ? b_transposed : b, c, 1.0f, 0.f); + result = reference::activation_layer<T>(result, act_info, QuantizationInfo()); // We reshape the gemm output back if the tensor is high dimensional if(output_shape_collapsed != output_shape) @@ -171,10 +210,46 @@ protected: return result; } + TensorType _target{}; SimpleTensor<T> _reference{}; }; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class MatMulValidationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T> +{ +public: + template <typename...> + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type) + { + MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, ActivationLayerInfo(), 0, + Settings()); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class MatMulValidationWithActivationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T> +{ +public: + template <typename...> + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info) + { + MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings()); + } +}; + +template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T> +class MatMulValidationWithDynamicTensorsFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T> +{ +public: + template <typename...> + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs) + { + MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings()); + } +}; + } // namespace validation } // namespace test } // namespace arm_compute -#endif /* TESTS_VALIDATION_FIXTURES_MATMULFIXTURE */ +#endif /* ARM_COMPUTE_TEST_MATMUL_FIXTURE */ |