From f5d51f33308dd5754a9d6a4633b2b5501ed1ada8 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Tue, 17 Aug 2021 16:09:10 +0100 Subject: Retain weights in ClGemm when reconfiguring the operator with retention - Reduces the size of the WeightRetentions tests - Ensure the weights are retained the only the Src,Dst are updated Resolves: COMPMID-4775 Signed-off-by: Georgios Pinitas Change-Id: If12daa4bd4bf89ec28faa743fb7291895cbe7b7c Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6121 Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins --- .../fixtures/UNIT/WeightsRetentionFixture.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'tests/validation/fixtures/UNIT') diff --git a/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h b/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h index af9f776ebc..f5e6071340 100644 --- a/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h +++ b/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h @@ -74,10 +74,10 @@ protected: TensorType compute_target() { // Create tensors - TensorType w1 = create_tensor(TensorShape(180000U, 150U), DataType::F32, 1); - TensorType b1 = create_tensor(TensorShape(150U), DataType::F32, 1); - TensorType src = create_tensor(TensorShape(1U, 150U, 1200U, _max_batches), DataType::F32, 1); - TensorType dst = create_tensor(TensorShape(150U, _max_batches), DataType::F32, 1); + TensorType w1 = create_tensor(TensorShape(6000U, 15U), DataType::F32, 1); + TensorType b1 = create_tensor(TensorShape(15U), DataType::F32, 1); + TensorType src = create_tensor(TensorShape(1U, 15U, 400U, _max_batches), DataType::F32, 1); + TensorType dst = create_tensor(TensorShape(15U, _max_batches), DataType::F32, 1); // Create and configure function FullyConnectedFunction fc_layer_1; @@ -105,9 +105,9 @@ protected: int diff = _max_batches - _cur_batches; auto new_src_padding = PaddingSize(src_padding.top, src_padding.right, src_padding.bottom + diff, src_padding.left); auto new_dst_padding = PaddingSize(dst_padding.top, dst_padding.right, dst_padding.bottom + diff, dst_padding.left); - src.allocator()->info().set_tensor_shape(TensorShape(1U, 150U, 1200U, _cur_batches)).set_is_resizable(true).extend_padding(new_src_padding); + src.allocator()->info().set_tensor_shape(TensorShape(1U, 15U, 400U, _cur_batches)).set_is_resizable(true).extend_padding(new_src_padding); src.allocator()->info().set_is_resizable(false); - dst.allocator()->info().set_tensor_shape(TensorShape(150U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding); + dst.allocator()->info().set_tensor_shape(TensorShape(15U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding); dst.allocator()->info().set_is_resizable(false); // Configure FC info @@ -129,16 +129,16 @@ protected: SimpleTensor compute_reference() { // Create reference - SimpleTensor w1{ TensorShape(180000U, 150U), DataType::F32 }; - SimpleTensor b1{ TensorShape(150U), DataType::F32 }; - SimpleTensor src{ TensorShape(1U, 150U, 1200U, _cur_batches), DataType::F32 }; + SimpleTensor w1{ TensorShape(6000U, 15U), DataType::F32 }; + SimpleTensor b1{ TensorShape(15U), DataType::F32 }; + SimpleTensor src{ TensorShape(1U, 15U, 400U, _cur_batches), DataType::F32 }; // Fill reference fill(src, 5); fill(w1, 1); fill(b1, 2); - return reference::fully_connected_layer(src, w1, b1, TensorShape(150U, _cur_batches)); + return reference::fully_connected_layer(src, w1, b1, TensorShape(15U, _cur_batches)); } protected: -- cgit v1.2.1