aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2021-08-17 16:09:10 +0100
committerGiorgio Arena <giorgio.arena@arm.com>2021-08-18 09:42:38 +0000
commitf5d51f33308dd5754a9d6a4633b2b5501ed1ada8 (patch)
treea53eeece8fa5db6e633d143494784cad0e539092 /tests/validation/fixtures/UNIT/WeightsRetentionFixture.h
parent8273676c64a032794c1eba06e43dec8cd93b2314 (diff)
downloadComputeLibrary-f5d51f33308dd5754a9d6a4633b2b5501ed1ada8.tar.gz
Retain weights in ClGemm when reconfiguring the operator with retention
- Reduces the size of the WeightRetentions tests - Ensure the weights are retained the only the Src,Dst are updated Resolves: COMPMID-4775 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: If12daa4bd4bf89ec28faa743fb7291895cbe7b7c Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6121 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/UNIT/WeightsRetentionFixture.h')
-rw-r--r--tests/validation/fixtures/UNIT/WeightsRetentionFixture.h20
1 files changed, 10 insertions, 10 deletions
diff --git a/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h b/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h
index af9f776ebc..f5e6071340 100644
--- a/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h
+++ b/tests/validation/fixtures/UNIT/WeightsRetentionFixture.h
@@ -74,10 +74,10 @@ protected:
TensorType compute_target()
{
// Create tensors
- TensorType w1 = create_tensor<TensorType>(TensorShape(180000U, 150U), DataType::F32, 1);
- TensorType b1 = create_tensor<TensorType>(TensorShape(150U), DataType::F32, 1);
- TensorType src = create_tensor<TensorType>(TensorShape(1U, 150U, 1200U, _max_batches), DataType::F32, 1);
- TensorType dst = create_tensor<TensorType>(TensorShape(150U, _max_batches), DataType::F32, 1);
+ TensorType w1 = create_tensor<TensorType>(TensorShape(6000U, 15U), DataType::F32, 1);
+ TensorType b1 = create_tensor<TensorType>(TensorShape(15U), DataType::F32, 1);
+ TensorType src = create_tensor<TensorType>(TensorShape(1U, 15U, 400U, _max_batches), DataType::F32, 1);
+ TensorType dst = create_tensor<TensorType>(TensorShape(15U, _max_batches), DataType::F32, 1);
// Create and configure function
FullyConnectedFunction fc_layer_1;
@@ -105,9 +105,9 @@ protected:
int diff = _max_batches - _cur_batches;
auto new_src_padding = PaddingSize(src_padding.top, src_padding.right, src_padding.bottom + diff, src_padding.left);
auto new_dst_padding = PaddingSize(dst_padding.top, dst_padding.right, dst_padding.bottom + diff, dst_padding.left);
- src.allocator()->info().set_tensor_shape(TensorShape(1U, 150U, 1200U, _cur_batches)).set_is_resizable(true).extend_padding(new_src_padding);
+ src.allocator()->info().set_tensor_shape(TensorShape(1U, 15U, 400U, _cur_batches)).set_is_resizable(true).extend_padding(new_src_padding);
src.allocator()->info().set_is_resizable(false);
- dst.allocator()->info().set_tensor_shape(TensorShape(150U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding);
+ dst.allocator()->info().set_tensor_shape(TensorShape(15U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding);
dst.allocator()->info().set_is_resizable(false);
// Configure FC info
@@ -129,16 +129,16 @@ protected:
SimpleTensor<T> compute_reference()
{
// Create reference
- SimpleTensor<T> w1{ TensorShape(180000U, 150U), DataType::F32 };
- SimpleTensor<T> b1{ TensorShape(150U), DataType::F32 };
- SimpleTensor<T> src{ TensorShape(1U, 150U, 1200U, _cur_batches), DataType::F32 };
+ SimpleTensor<T> w1{ TensorShape(6000U, 15U), DataType::F32 };
+ SimpleTensor<T> b1{ TensorShape(15U), DataType::F32 };
+ SimpleTensor<T> src{ TensorShape(1U, 15U, 400U, _cur_batches), DataType::F32 };
// Fill reference
fill(src, 5);
fill(w1, 1);
fill(b1, 2);
- return reference::fully_connected_layer(src, w1, b1, TensorShape(150U, _cur_batches));
+ return reference::fully_connected_layer(src, w1, b1, TensorShape(15U, _cur_batches));
}
protected: