diff options
author | Giorgio Arena <giorgio.arena@arm.com> | 2021-09-24 14:04:27 +0100 |
---|---|---|
committer | Giorgio Arena <giorgio.arena@arm.com> | 2021-09-29 10:31:08 +0000 |
commit | 63e0beb9fb9646407d123e830165546e9129e95d (patch) | |
tree | 9bfe80e8d853327a82f9f622d89c3b43df0400f4 /src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp | |
parent | b1ba1e33f2b03b211f561123559c24517c0e5865 (diff) | |
download | ComputeLibrary-63e0beb9fb9646407d123e830165546e9129e95d.tar.gz |
Add support for non-constant weights and biases in CpuFullyConnected
Changing the approach for specifying that weights and biases tensors are
non-constant by making it a member of TensorInfo rather than an option
of the functions.
Resolves: COMPMID-4222, COMPMID-4811
Signed-off-by: Giorgio Arena <giorgio.arena@arm.com>
Change-Id: I9b0081ccbcf8271ce029ba6755563d64c59e1d32
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6313
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp')
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp b/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp index d4348beabf..f0b4e5db9e 100644 --- a/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp +++ b/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp @@ -201,11 +201,11 @@ public: return _buffer_per_multi * _args._nmulti * sizeof(To) + get_col_sum_size(); } - void pretranspose_B_array(void *buffer, const To *B, const int ldb, const int B_multi_stride) override { + void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override { // Column sums go on the front of the pretransposed buffer in requantized cases. // We could optimize here in case we don't actually need to sum the columns, but this code is only run on setup. if (std::is_same<OutputStage, Requantize32>::value) { - col_bias = reinterpret_cast<int32_t *>(buffer); + col_bias = reinterpret_cast<int32_t *>(in_buffer); Requantize32 *qp_ptr = reinterpret_cast<Requantize32 *>(&_os); @@ -213,6 +213,10 @@ public: compute_col_sums(*qp_ptr, _args._Nsize, _args._Ksize, B + (i * B_multi_stride), ldb, col_bias + (i * _args._Nsize), _args._Ksize, i, 0); } } + } + + void pretranspose_B_array(void *buffer, const To *B, const int ldb, const int B_multi_stride) override { + requantize_bias(buffer, B, ldb, B_multi_stride); // The actual transposed buffer goes after the column sums (if any) uintptr_t buffer_int = reinterpret_cast<uintptr_t>(buffer); |