From afae756dfea0a8d4e9bcf05bd6293166ccf3b55f Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Tue, 19 Feb 2019 18:10:03 +0000 Subject: COMPMID-2007: Compilation failures with ndk16b. Resolves double brace initialization issues. Change-Id: Ic9319d4abc1d6428cefabc18be1c176bea7607dc Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/729 Reviewed-by: Michele Di Giorgio Reviewed-by: Michalis Spyrou Tested-by: Arm Jenkins --- .../NEON/kernels/NEElementwiseOperationKernel.cpp | 30 ++++++---- src/core/NEON/kernels/NEPermuteKernel.cpp | 64 ++++++++++++---------- .../NEON/kernels/NEReductionOperationKernel.cpp | 28 ++++++---- .../CPP/functions/CPPDetectionOutputLayer.cpp | 32 +++++++---- tests/datasets/PriorBoxLayerDataset.h | 6 +- tests/validation/CL/BoundingBoxTransform.cpp | 13 +++-- tests/validation/reference/PadLayer.cpp | 6 +- 7 files changed, 102 insertions(+), 77 deletions(-) diff --git a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp index 789ef5c2b0..aa458c2119 100644 --- a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp +++ b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp @@ -79,10 +79,12 @@ void store_quantized(uint8_t *output_ptr, const float32x4x4_t &rf, const float32 { int32x4x4_t out = { - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[0], invscale)), - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[1], invscale)), - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)), - vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)), + { + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[0], invscale)), + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[1], invscale)), + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)), + vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)), + } }; store_quantized(output_ptr, out); } @@ -185,10 +187,12 @@ inline float32x4x4_t elementwise_arithm_op(const float32x4x4_t &a, const float32 { float32x4x4_t out = { - elementwise_arithm_op(a.val[0], b.val[0]), - elementwise_arithm_op(a.val[1], b.val[1]), - elementwise_arithm_op(a.val[2], b.val[2]), - elementwise_arithm_op(a.val[3], b.val[3]), + { + elementwise_arithm_op(a.val[0], b.val[0]), + elementwise_arithm_op(a.val[1], b.val[1]), + elementwise_arithm_op(a.val[2], b.val[2]), + elementwise_arithm_op(a.val[3], b.val[3]), + } }; return out; } @@ -275,10 +279,12 @@ inline uint32x4x4_t elementwise_comp_op(const float32x4x4_t &a, const float32x4x { uint32x4x4_t out = { - elementwise_comp_op(a.val[0], b.val[0]), - elementwise_comp_op(a.val[1], b.val[1]), - elementwise_comp_op(a.val[2], b.val[2]), - elementwise_comp_op(a.val[3], b.val[3]) + { + elementwise_comp_op(a.val[0], b.val[0]), + elementwise_comp_op(a.val[1], b.val[1]), + elementwise_comp_op(a.val[2], b.val[2]), + elementwise_comp_op(a.val[3], b.val[3]) + } }; return out; } diff --git a/src/core/NEON/kernels/NEPermuteKernel.cpp b/src/core/NEON/kernels/NEPermuteKernel.cpp index e896dd45e4..1df94aef06 100644 --- a/src/core/NEON/kernels/NEPermuteKernel.cpp +++ b/src/core/NEON/kernels/NEPermuteKernel.cpp @@ -47,39 +47,43 @@ inline bool is_permutation_supported(const PermutationVector &v) { static const std::array permutations3 = { - PermutationVector(2U, 0U, 1U), - PermutationVector(1U, 2U, 0U), - PermutationVector(0U, 1U, 2U), - PermutationVector(0U, 2U, 1U), - PermutationVector(1U, 0U, 2U), - PermutationVector(2U, 1U, 0U), + { + PermutationVector(2U, 0U, 1U), + PermutationVector(1U, 2U, 0U), + PermutationVector(0U, 1U, 2U), + PermutationVector(0U, 2U, 1U), + PermutationVector(1U, 0U, 2U), + PermutationVector(2U, 1U, 0U), + } }; static const std::array permutations4 = { - PermutationVector(0U, 1U, 2U, 3U), - PermutationVector(1U, 0U, 2U, 3U), - PermutationVector(2U, 0U, 1U, 3U), - PermutationVector(0U, 2U, 1U, 3U), - PermutationVector(1U, 2U, 0U, 3U), - PermutationVector(2U, 1U, 0U, 3U), - PermutationVector(2U, 1U, 3U, 0U), - PermutationVector(1U, 2U, 3U, 0U), - PermutationVector(3U, 2U, 1U, 0U), - PermutationVector(2U, 3U, 1U, 0U), - PermutationVector(1U, 3U, 2U, 0U), - PermutationVector(3U, 1U, 2U, 0U), - PermutationVector(3U, 0U, 2U, 1U), - PermutationVector(0U, 3U, 2U, 1U), - PermutationVector(2U, 3U, 0U, 1U), - PermutationVector(3U, 2U, 0U, 1U), - PermutationVector(0U, 2U, 3U, 1U), - PermutationVector(2U, 0U, 3U, 1U), - PermutationVector(1U, 0U, 3U, 2U), - PermutationVector(0U, 1U, 3U, 2U), - PermutationVector(3U, 1U, 0U, 2U), - PermutationVector(1U, 3U, 0U, 2U), - PermutationVector(0U, 3U, 1U, 2U), - PermutationVector(3U, 0U, 1U, 2U) + { + PermutationVector(0U, 1U, 2U, 3U), + PermutationVector(1U, 0U, 2U, 3U), + PermutationVector(2U, 0U, 1U, 3U), + PermutationVector(0U, 2U, 1U, 3U), + PermutationVector(1U, 2U, 0U, 3U), + PermutationVector(2U, 1U, 0U, 3U), + PermutationVector(2U, 1U, 3U, 0U), + PermutationVector(1U, 2U, 3U, 0U), + PermutationVector(3U, 2U, 1U, 0U), + PermutationVector(2U, 3U, 1U, 0U), + PermutationVector(1U, 3U, 2U, 0U), + PermutationVector(3U, 1U, 2U, 0U), + PermutationVector(3U, 0U, 2U, 1U), + PermutationVector(0U, 3U, 2U, 1U), + PermutationVector(2U, 3U, 0U, 1U), + PermutationVector(3U, 2U, 0U, 1U), + PermutationVector(0U, 2U, 3U, 1U), + PermutationVector(2U, 0U, 3U, 1U), + PermutationVector(1U, 0U, 3U, 2U), + PermutationVector(0U, 1U, 3U, 2U), + PermutationVector(3U, 1U, 0U, 2U), + PermutationVector(1U, 3U, 0U, 2U), + PermutationVector(0U, 3U, 1U, 2U), + PermutationVector(3U, 0U, 1U, 2U) + } }; return (permutations3.end() != std::find(permutations3.begin(), permutations3.end(), v)) || (permutations4.end() != std::find(permutations4.begin(), permutations4.end(), v)); diff --git a/src/core/NEON/kernels/NEReductionOperationKernel.cpp b/src/core/NEON/kernels/NEReductionOperationKernel.cpp index 476b3c8720..a765535c70 100644 --- a/src/core/NEON/kernels/NEReductionOperationKernel.cpp +++ b/src/core/NEON/kernels/NEReductionOperationKernel.cpp @@ -57,14 +57,14 @@ uint32x4x4_t calculate_index(uint32_t idx, float32x4_t a, float32x4_t b, uint32x { vec_idx = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{}); } - uint32x4x4_t res = { wrapper::vbsl(mask, vec_idx, c.val[0]), 0, 0, 0 }; + uint32x4x4_t res = { { wrapper::vbsl(mask, vec_idx, c.val[0]), 0, 0, 0 } }; return res; } uint32x4x4_t calculate_index(uint32_t idx, uint8x16_t a, uint8x16_t b, uint32x4x4_t c, ReductionOperation op, int axis) { - uint32x4x4_t mask{ 0 }; + uint32x4x4_t mask{ { 0 } }; uint8x16_t mask_u8{ 0 }; if(op == ReductionOperation::ARG_IDX_MIN) { @@ -94,11 +94,15 @@ uint32x4x4_t calculate_index(uint32_t idx, uint8x16_t a, uint8x16_t b, uint32x4x vec_idx.val[2] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{}); vec_idx.val[3] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{}); } - uint32x4x4_t res = { vbslq_u32(mask.val[0], vec_idx.val[0], c.val[0]), - vbslq_u32(mask.val[1], vec_idx.val[1], c.val[1]), - vbslq_u32(mask.val[2], vec_idx.val[2], c.val[2]), - vbslq_u32(mask.val[3], vec_idx.val[3], c.val[3]) - }; + uint32x4x4_t res = + { + { + vbslq_u32(mask.val[0], vec_idx.val[0], c.val[0]), + vbslq_u32(mask.val[1], vec_idx.val[1], c.val[1]), + vbslq_u32(mask.val[2], vec_idx.val[2], c.val[2]), + vbslq_u32(mask.val[3], vec_idx.val[3], c.val[3]) + } + }; return res; } @@ -133,7 +137,7 @@ uint32_t calculate_vector_index(uint32x4x4_t vec_res_idx, float32x4_t vec_res_va uint32_t calculate_vector_index(uint32x4x4_t vec_res_idx, uint8x16_t vec_res_value, ReductionOperation op) { - uint32x4x4_t res_idx_mask{ 0 }; + uint32x4x4_t res_idx_mask{ { 0 } }; uint32x4_t mask_ones = vdupq_n_u32(0xFFFFFFFF); uint8x16_t mask_u8{ 0 }; if(op == ReductionOperation::ARG_IDX_MIN) @@ -367,7 +371,7 @@ struct RedOpX init_res_value = static_cast(1.f); } auto vec_res_value = wrapper::vdup_n(init_res_value, ExactTagType{}); - uint32x4x4_t vec_res_idx{ 0 }; + uint32x4x4_t vec_res_idx{ { 0 } }; execute_window_loop(in_slice, [&](const Coordinates & id) { @@ -473,7 +477,7 @@ struct RedOpX_qasymm8 vec_res_value = wrapper::vdup_n(*input.ptr(), wrapper::traits::vector_128_tag{}); } - uint32x4x4_t vec_res_idx{ 0 }; + uint32x4x4_t vec_res_idx{ { 0 } }; execute_window_loop(in_slice, [&](const Coordinates & id) { const auto vec_elements = wrapper::vloadq(input.ptr()); @@ -612,7 +616,7 @@ struct RedOpYZW { vec_res_value = wrapper::vdup_n(static_cast(0.f), ExactTagType{}); } - uint32x4x4_t vec_res_idx{ 0 }; + uint32x4x4_t vec_res_idx{ { 0 } }; for(unsigned int dim = 0; dim < in_info.dimension(axis); ++dim) { @@ -691,7 +695,7 @@ struct RedOpYZW_qasymm8 execute_window_loop(in_slice, [&](const Coordinates & id) { - uint32x4x4_t vec_res_idx{ 0 }; + uint32x4x4_t vec_res_idx{ { 0 } }; auto vec_res_value1 = vdupq_n_u32(0); auto vec_res_value2 = vdupq_n_u32(0); auto vec_res_value3 = vdupq_n_u32(0); diff --git a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp index 34a7294513..79e619cfd6 100644 --- a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp +++ b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp @@ -173,14 +173,18 @@ void retrieve_all_priorbox(const ITensor *input_priorbox, { for(int i = 0; i < num_priors; ++i) { - all_prior_bboxes[i] = { *reinterpret_cast(input_priorbox->ptr_to_element(Coordinates(i * 4))), - *reinterpret_cast(input_priorbox->ptr_to_element(Coordinates(i * 4 + 1))), - *reinterpret_cast(input_priorbox->ptr_to_element(Coordinates(i * 4 + 2))), - *reinterpret_cast(input_priorbox->ptr_to_element(Coordinates(i * 4 + 3))) - }; + all_prior_bboxes[i] = + { + { + *reinterpret_cast(input_priorbox->ptr_to_element(Coordinates(i * 4))), + *reinterpret_cast(input_priorbox->ptr_to_element(Coordinates(i * 4 + 1))), + *reinterpret_cast(input_priorbox->ptr_to_element(Coordinates(i * 4 + 2))), + *reinterpret_cast(input_priorbox->ptr_to_element(Coordinates(i * 4 + 3))) + } + }; } - std::array var({ 0, 0, 0, 0 }); + std::array var({ { 0, 0, 0, 0 } }); for(int i = 0; i < num_priors; ++i) { for(int j = 0; j < 4; ++j) @@ -325,16 +329,20 @@ void ApplyNMSFast(const std::vector &bboxes, if(keep) { // Compute the jaccard (intersection over union IoU) overlap between two bboxes. - NormalizedBBox intersect_bbox = std::array({ 0, 0, 0, 0 }); + NormalizedBBox intersect_bbox = std::array({ { 0, 0, 0, 0 } }); if(bboxes[kept_idx][0] > bboxes[idx][2] || bboxes[kept_idx][2] < bboxes[idx][0] || bboxes[kept_idx][1] > bboxes[idx][3] || bboxes[kept_idx][3] < bboxes[idx][1]) { - intersect_bbox = std::array({ 0, 0, 0, 0 }); + intersect_bbox = std::array({ { 0, 0, 0, 0 } }); } else { - intersect_bbox = std::array({ std::max(bboxes[idx][0], bboxes[kept_idx][0]), std::max(bboxes[idx][1], bboxes[kept_idx][1]), std::min(bboxes[idx][2], bboxes[kept_idx][2]), std::min(bboxes[idx][3], - bboxes[kept_idx][3]) - }); + intersect_bbox = std::array({ { + std::max(bboxes[idx][0], bboxes[kept_idx][0]), + std::max(bboxes[idx][1], bboxes[kept_idx][1]), + std::min(bboxes[idx][2], bboxes[kept_idx][2]), + std::min(bboxes[idx][3], bboxes[kept_idx][3]) + } + }); } float intersect_width = intersect_bbox[2] - intersect_bbox[0]; @@ -434,7 +442,7 @@ void extract_bounding_boxes_from_tensor(const ITensor *bboxes, std::vector(input.ptr()); - bboxes_vector.push_back(NormalizedBBox({ *input_ptr, *(input_ptr + 1), *(2 + input_ptr), *(3 + input_ptr) })); + bboxes_vector.push_back(NormalizedBBox({ { *input_ptr, *(input_ptr + 1), *(2 + input_ptr), *(3 + input_ptr) } })); }; execute_window_loop(input_win, f, input); } diff --git a/tests/datasets/PriorBoxLayerDataset.h b/tests/datasets/PriorBoxLayerDataset.h index c63e941171..a2392dbafc 100644 --- a/tests/datasets/PriorBoxLayerDataset.h +++ b/tests/datasets/PriorBoxLayerDataset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -109,7 +109,7 @@ public: std::vector var = { 0.1, 0.1, 0.2, 0.2 }; std::vector max_val = { 60.f }; std::vector aspect_ratio = { 2.f }; - std::array steps = { 8.f, 8.f }; + std::array steps = { { 8.f, 8.f } }; add_config(TensorShape(4U, 4U), PriorBoxLayerInfo(min_val, var, 0.5f, true, false, max_val, aspect_ratio, Coordinates2D{ 8, 8 }, steps)); } }; @@ -123,7 +123,7 @@ public: std::vector var = { 0.1, 0.1, 0.2, 0.2 }; std::vector max_val = { 60.f }; std::vector aspect_ratio = { 2.f }; - std::array steps = { 8.f, 8.f }; + std::array steps = { { 8.f, 8.f } }; add_config(TensorShape(150U, 245U, 4U, 12U), PriorBoxLayerInfo(min_val, var, 0.5f, true, false, max_val, aspect_ratio, Coordinates2D{ 8, 8 }, steps)); } }; diff --git a/tests/validation/CL/BoundingBoxTransform.cpp b/tests/validation/CL/BoundingBoxTransform.cpp index c5856cae3d..b6334b5868 100644 --- a/tests/validation/CL/BoundingBoxTransform.cpp +++ b/tests/validation/CL/BoundingBoxTransform.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -46,12 +46,14 @@ AbsoluteTolerance absolute_tolerance_f32(0.001f); RelativeTolerance relative_tolerance_f16(half(0.2)); AbsoluteTolerance absolute_tolerance_f16(half(0.02f)); +// *INDENT-OFF* +// clang-format off const auto BboxInfoDataset = framework::dataset::make("BboxInfo", { BoundingBoxTransformInfo(20U, 20U, 2U, true), BoundingBoxTransformInfo(128U, 128U, 4U, true), BoundingBoxTransformInfo(800U, 600U, 1U, false), - BoundingBoxTransformInfo(800U, 600U, 2U, true, { 1.0, 0.5, 1.5, 2.0 }), - BoundingBoxTransformInfo(800U, 600U, 4U, false, { 1.0, 0.5, 1.5, 2.0 }), - BoundingBoxTransformInfo(800U, 600U, 4U, false, { 1.0, 0.5, 1.5, 2.0 }, true) + BoundingBoxTransformInfo(800U, 600U, 2U, true, { { 1.0, 0.5, 1.5, 2.0 } }), + BoundingBoxTransformInfo(800U, 600U, 4U, false, { { 1.0, 0.5, 1.5, 2.0 } }), + BoundingBoxTransformInfo(800U, 600U, 4U, false, { { 1.0, 0.5, 1.5, 2.0 } }, true) }); const auto DeltaDataset = framework::dataset::make("DeltasShape", { TensorShape(36U, 1U), @@ -62,7 +64,8 @@ const auto DeltaDataset = framework::dataset::make("DeltasShape", { TensorShape( TensorShape(40U, 100U), TensorShape(40U, 200U) }); - +// clang-format on +// *INDENT-ON* } // namespace TEST_SUITE(CL) diff --git a/tests/validation/reference/PadLayer.cpp b/tests/validation/reference/PadLayer.cpp index 0a3b38d697..b9a93ddaff 100644 --- a/tests/validation/reference/PadLayer.cpp +++ b/tests/validation/reference/PadLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -65,8 +65,8 @@ SimpleTensor pad_layer(const SimpleTensor &src, const PaddingList &padding const size_t m = coord[4]; const size_t n = coord[5]; - std::array dims = { 0, 1, 2, 3, 4, 5 }; - std::array coords = { i, j, k, l, m, n }; + std::array dims = { { 0, 1, 2, 3, 4, 5 } }; + std::array coords = { { i, j, k, l, m, n } }; auto is_padding_area = [&](size_t i) { return (coords[i] < paddings_extended[i].first || coords[i] > orig_shape[i] + paddings_extended[i].first - 1); -- cgit v1.2.1