From eed07aefb25c0a5cb292320d17832c6864593885 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Tue, 17 Aug 2021 16:09:10 +0100 Subject: Retain weights in ClGemm when reconfiguring the operator with retention - Reduces the size of the WeightRetentions tests - Ensure the weights are retained the only the Src,Dst are updated Resolves: COMPMID-4775 Signed-off-by: Georgios Pinitas Change-Id: If12daa4bd4bf89ec28faa743fb7291895cbe7b7c Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6121 Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins --- src/runtime/CL/functions/CLFullyConnectedLayer.cpp | 16 ++++++++++++---- src/runtime/gpu/cl/operators/ClGemm.cpp | 1 + .../fixtures/UNIT/WeightsRetentionFixture.h | 20 ++++++++++---------- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp index c719a667a7..6836c12022 100644 --- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp +++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp @@ -76,7 +76,7 @@ void CLFullyConnectedLayer::configure(const CLCompileContext &compile_context, c _impl->op = std::make_unique(); _impl->original_weights = weights; - _impl->is_prepared = false; + _impl->is_prepared = fc_info.retain_internal_weights; _impl->op->configure(compile_context, input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), fc_info); @@ -85,9 +85,17 @@ void CLFullyConnectedLayer::configure(const CLCompileContext &compile_context, c _impl->weights_manager->manage(weights); } - _impl->aux_mem_req = _impl->op->workspace(); - _impl->run_pack = { { ACL_SRC_0, input }, { ACL_SRC_1, weights }, { ACL_SRC_2, biases }, { ACL_DST, output } }; - _impl->workspace = manage_workspace(_impl->aux_mem_req, _impl->memory_group, _impl->run_pack, _impl->run_pack); + if(!_impl->is_prepared) + { + _impl->aux_mem_req = _impl->op->workspace(); + _impl->run_pack = { { ACL_SRC_0, input }, { ACL_SRC_1, weights }, { ACL_SRC_2, biases }, { ACL_DST, output } }; + _impl->workspace = manage_workspace(_impl->aux_mem_req, _impl->memory_group, _impl->run_pack, _impl->run_pack); + } + else + { + _impl->run_pack.add_tensor(ACL_SRC_0, input); + _impl->run_pack.add_tensor(ACL_DST, output); + } } Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, diff --git a/src/runtime/gpu/cl/operators/ClGemm.cpp b/src/runtime/gpu/cl/operators/ClGemm.cpp index 2792dc470d..59bbabba26 100644 --- a/src/runtime/gpu/cl/operators/ClGemm.cpp +++ b/src/runtime/gpu/cl/operators/ClGemm.cpp @@ -564,6 +564,7 @@ void ClGemm::configure(const CLCompileContext &compile_context, ITensorInfo *a, // Check if we need to reshape the matrix B only on the first run _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run(); + _is_prepared = gemm_info.retain_internal_weights(); bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); const unsigned int m = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1); diff --git a/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h b/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h index af9f776ebc..f5e6071340 100644 --- a/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h +++ b/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h @@ -74,10 +74,10 @@ protected: TensorType compute_target() { // Create tensors - TensorType w1 = create_tensor(TensorShape(180000U, 150U), DataType::F32, 1); - TensorType b1 = create_tensor(TensorShape(150U), DataType::F32, 1); - TensorType src = create_tensor(TensorShape(1U, 150U, 1200U, _max_batches), DataType::F32, 1); - TensorType dst = create_tensor(TensorShape(150U, _max_batches), DataType::F32, 1); + TensorType w1 = create_tensor(TensorShape(6000U, 15U), DataType::F32, 1); + TensorType b1 = create_tensor(TensorShape(15U), DataType::F32, 1); + TensorType src = create_tensor(TensorShape(1U, 15U, 400U, _max_batches), DataType::F32, 1); + TensorType dst = create_tensor(TensorShape(15U, _max_batches), DataType::F32, 1); // Create and configure function FullyConnectedFunction fc_layer_1; @@ -105,9 +105,9 @@ protected: int diff = _max_batches - _cur_batches; auto new_src_padding = PaddingSize(src_padding.top, src_padding.right, src_padding.bottom + diff, src_padding.left); auto new_dst_padding = PaddingSize(dst_padding.top, dst_padding.right, dst_padding.bottom + diff, dst_padding.left); - src.allocator()->info().set_tensor_shape(TensorShape(1U, 150U, 1200U, _cur_batches)).set_is_resizable(true).extend_padding(new_src_padding); + src.allocator()->info().set_tensor_shape(TensorShape(1U, 15U, 400U, _cur_batches)).set_is_resizable(true).extend_padding(new_src_padding); src.allocator()->info().set_is_resizable(false); - dst.allocator()->info().set_tensor_shape(TensorShape(150U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding); + dst.allocator()->info().set_tensor_shape(TensorShape(15U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding); dst.allocator()->info().set_is_resizable(false); // Configure FC info @@ -129,16 +129,16 @@ protected: SimpleTensor compute_reference() { // Create reference - SimpleTensor w1{ TensorShape(180000U, 150U), DataType::F32 }; - SimpleTensor b1{ TensorShape(150U), DataType::F32 }; - SimpleTensor src{ TensorShape(1U, 150U, 1200U, _cur_batches), DataType::F32 }; + SimpleTensor w1{ TensorShape(6000U, 15U), DataType::F32 }; + SimpleTensor b1{ TensorShape(15U), DataType::F32 }; + SimpleTensor src{ TensorShape(1U, 15U, 400U, _cur_batches), DataType::F32 }; // Fill reference fill(src, 5); fill(w1, 1); fill(b1, 2); - return reference::fully_connected_layer(src, w1, b1, TensorShape(150U, _cur_batches)); + return reference::fully_connected_layer(src, w1, b1, TensorShape(15U, _cur_batches)); } protected: -- cgit v1.2.1