From 7d66a8e3f603f2cd363f04a750847e3f9eabdfd4 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Tue, 17 Jul 2018 12:28:42 +0100 Subject: COMPMID-1386: Add support for converting weights for CL. Change-Id: I62e3ead903366baeeb1488f233a9b8b0c388c9de Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/140403 Tested-by: Jenkins Reviewed-by: Giorgio Arena Reviewed-by: Anthony Barbier --- tests/validation/fixtures/FullyConnectedLayerFixture.h | 7 ++++++- tests/validation/fixtures/UNIT/MemoryManagerFixture.h | 14 +++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'tests/validation/fixtures') diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h index 895e43b735..18321480f8 100644 --- a/tests/validation/fixtures/FullyConnectedLayerFixture.h +++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h @@ -130,9 +130,14 @@ protected: TensorType bias = create_tensor(bias_shape, _bias_data_type, 1, _quantization_info); TensorType dst = create_tensor(output_shape, _data_type, 1, _quantization_info); + // Create Fully Connected layer info + FullyConnectedLayerInfo fc_info; + fc_info.transpose_weights = transpose_weights; + fc_info.are_weights_reshaped = !reshape_weights; + // Create and configure function. FunctionType fc; - fc.configure(&src, &weights, &bias, &dst, transpose_weights, !reshape_weights); + fc.configure(&src, &weights, &bias, &dst, fc_info); ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); diff --git a/tests/validation/fixtures/UNIT/MemoryManagerFixture.h b/tests/validation/fixtures/UNIT/MemoryManagerFixture.h index 21ad42bf77..d8e2b0b427 100644 --- a/tests/validation/fixtures/UNIT/MemoryManagerFixture.h +++ b/tests/validation/fixtures/UNIT/MemoryManagerFixture.h @@ -239,9 +239,13 @@ protected: dst.allocator()->info().set_tensor_shape(TensorShape(24U, _cur_batches)).set_is_resizable(true).extend_padding(new_dst_padding); dst.allocator()->info().set_is_resizable(false); + // Configure FC info + FullyConnectedLayerInfo fc_info; + fc_info.retain_internal_weights = true; + // Configure functions (2nd iteration) - fc_layer_1.configure(&src, &w1, &b1, &fc1, true, false, true); - fc_layer_2.configure(&fc1, &w2, &b2, &dst, true, false, true); + fc_layer_1.configure(&src, &w1, &b1, &fc1, fc_info); + fc_layer_2.configure(&fc1, &w2, &b2, &dst, fc_info); // Fill tensors (2nd iteration) fill(AccessorType(src), 5); @@ -357,6 +361,10 @@ protected: // Get padding requirements auto fc_padding = fc.allocator()->info().padding(); + // Configure FC info + FullyConnectedLayerInfo fc_info; + fc_info.retain_internal_weights = true; + // Run rest iterations for(int i = _max_batches; i >= static_cast(_cur_batches); --i) { @@ -368,7 +376,7 @@ protected: dst.allocator()->info().set_tensor_shape(TensorShape(8U, i)); // Configure functions - fc_layer.configure(&src, &w, &b, &fc, true, false, true); + fc_layer.configure(&src, &w, &b, &fc, fc_info); smx_layer.configure(&fc, &dst); // Fill tensors -- cgit v1.2.1