diff options
author | Moritz Pflanzer <moritz.pflanzer@arm.com> | 2017-08-31 17:10:18 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:35:24 +0000 |
commit | 95643d83d8f4d0431b3983f771ca749963f6a966 (patch) | |
tree | 05b890a7fcaca77e10c665ffb9f883aee040479c /tests/networks_new | |
parent | a230e0aa44409ea33085634764922ba617a15394 (diff) | |
download | ComputeLibrary-95643d83d8f4d0431b3983f771ca749963f6a966.tar.gz |
COMPMID-417: Fix benchmark tests
Change-Id: I4c4786c38fd3381015abbb9f1ef5612c712594b6
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/86019
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: SiCong Li <sicong.li@arm.com>
Diffstat (limited to 'tests/networks_new')
-rw-r--r-- | tests/networks_new/AlexNetNetwork.h | 29 |
1 files changed, 17 insertions, 12 deletions
diff --git a/tests/networks_new/AlexNetNetwork.h b/tests/networks_new/AlexNetNetwork.h index b3a719671d..11171d6ec2 100644 --- a/tests/networks_new/AlexNetNetwork.h +++ b/tests/networks_new/AlexNetNetwork.h @@ -97,10 +97,15 @@ public: } else { - const unsigned int data_type_size = 16 / arm_compute::data_size_from_type(_data_type); + auto reshape = [&](unsigned int width, unsigned int height) -> TensorShape + { + const int interleave_width = 16 / arm_compute::data_size_from_type(_data_type); + + return TensorShape{ width * interleave_width, static_cast<unsigned int>(std::ceil(static_cast<float>(height) / interleave_width)) }; + }; // Create tensor for the reshaped weights - w[0].allocator()->init(TensorInfo(TensorShape(366U * data_type_size, 96U / data_type_size), 1, _data_type, _fixed_point_position)); + w[0].allocator()->init(TensorInfo(reshape(366U, 96U), 1, _data_type, _fixed_point_position)); // Configure the direct convolution's weights. Direct convolution doesn't need reshape weights if(!_is_direct_conv) @@ -111,13 +116,13 @@ public: auto w42_tensor = std::unique_ptr<TensorType>(new TensorType()); auto w51_tensor = std::unique_ptr<TensorType>(new TensorType()); auto w52_tensor = std::unique_ptr<TensorType>(new TensorType()); - w21_tensor->allocator()->init(TensorInfo(TensorShape(1248U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position)); - w22_tensor->allocator()->init(TensorInfo(TensorShape(1248U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position)); - w41_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 192U / data_type_size), 1, _data_type, _fixed_point_position)); - w42_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 192U / data_type_size), 1, _data_type, _fixed_point_position)); - w51_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position)); - w52_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position)); - w[2].allocator()->init(TensorInfo(TensorShape(2560U * data_type_size, 384U / data_type_size), 1, _data_type, _fixed_point_position)); + w21_tensor->allocator()->init(TensorInfo(reshape(1248U, 128U), 1, _data_type, _fixed_point_position)); + w22_tensor->allocator()->init(TensorInfo(reshape(1248U, 128U), 1, _data_type, _fixed_point_position)); + w41_tensor->allocator()->init(TensorInfo(reshape(1920U, 192U), 1, _data_type, _fixed_point_position)); + w42_tensor->allocator()->init(TensorInfo(reshape(1920U, 192U), 1, _data_type, _fixed_point_position)); + w51_tensor->allocator()->init(TensorInfo(reshape(1920U, 128U), 1, _data_type, _fixed_point_position)); + w52_tensor->allocator()->init(TensorInfo(reshape(1920U, 128U), 1, _data_type, _fixed_point_position)); + w[2].allocator()->init(TensorInfo(reshape(2560U, 384U), 1, _data_type, _fixed_point_position)); w21 = std::move(w21_tensor); w22 = std::move(w22_tensor); w41 = std::move(w41_tensor); @@ -157,9 +162,9 @@ public: if(_batches > 1 && std::is_same<TensorType, Tensor>::value) { - w[5].allocator()->init(TensorInfo(TensorShape(9216U * data_type_size, 4096U / data_type_size), 1, _data_type, _fixed_point_position)); - w[6].allocator()->init(TensorInfo(TensorShape(4096U * data_type_size, 4096U / data_type_size), 1, _data_type, _fixed_point_position)); - w[7].allocator()->init(TensorInfo(TensorShape(4096U * data_type_size, 1000U / data_type_size), 1, _data_type, _fixed_point_position)); + w[5].allocator()->init(TensorInfo(reshape(9216U, 4096U), 1, _data_type, _fixed_point_position)); + w[6].allocator()->init(TensorInfo(reshape(4096U, 4096U), 1, _data_type, _fixed_point_position)); + w[7].allocator()->init(TensorInfo(reshape(4096U, 1000U), 1, _data_type, _fixed_point_position)); } else { |