aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-06-06 13:47:38 +0100
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-06-07 10:07:21 +0000
commit6260e194b34842ac6d932dd5c96842c0d1214d70 (patch)
tree654c6a24dd720d11f26be275d1c3527e8812614a
parentd69b3b2db4b359d4e68cf786a294fa1d7d4db2d0 (diff)
downloadComputeLibrary-6260e194b34842ac6d932dd5c96842c0d1214d70.tar.gz
COMPMID-2394: (Nightly) Clang-tidy errors
Change-Id: Ie7ccf4e62cccca6057c0bc4521496267a05e4459 Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/1304 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com>
-rw-r--r--arm_compute/graph/nodes/FullyConnectedLayerNode.h6
-rw-r--r--examples/graph_mobilenet.cpp4
-rw-r--r--src/graph/nodes/ActivationLayerNode.cpp2
-rw-r--r--src/graph/nodes/ConvolutionLayerNode.cpp2
-rw-r--r--src/graph/nodes/DepthwiseConvolutionLayerNode.cpp2
-rw-r--r--src/graph/nodes/FullyConnectedLayer.cpp8
-rw-r--r--src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp2
-rw-r--r--src/graph/nodes/QuantizationLayerNode.cpp2
-rw-r--r--src/runtime/NEON/functions/NECast.cpp2
-rw-r--r--tests/validation/reference/ChannelExtract.cpp4
-rw-r--r--tests/validation/reference/ColorConvertHelper.h8
-rw-r--r--tests/validation/reference/PixelWiseMultiplication.cpp13
-rw-r--r--tests/validation/reference/PixelWiseMultiplication.h4
13 files changed, 30 insertions, 29 deletions
diff --git a/arm_compute/graph/nodes/FullyConnectedLayerNode.h b/arm_compute/graph/nodes/FullyConnectedLayerNode.h
index 33f9b1eefe..80dc28443c 100644
--- a/arm_compute/graph/nodes/FullyConnectedLayerNode.h
+++ b/arm_compute/graph/nodes/FullyConnectedLayerNode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -57,7 +57,7 @@ public:
static TensorDescriptor compute_weights_descriptor(const TensorDescriptor &input_descriptor,
unsigned int num_outputs,
FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(),
- QuantizationInfo weights_quant_info = QuantizationInfo());
+ const QuantizationInfo &weights_quant_info = QuantizationInfo());
/** Computes fully connected layer output descriptor
*
* @warning Works for inputs with 1D batch space
@@ -70,7 +70,7 @@ public:
*/
static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor,
unsigned int num_outputs,
- QuantizationInfo out_quant_info = QuantizationInfo());
+ const QuantizationInfo &out_quant_info = QuantizationInfo());
/** Fully connected layer addition information
*
* @return Additional information about the fully connected layer
diff --git a/examples/graph_mobilenet.cpp b/examples/graph_mobilenet.cpp
index dda88ee0da..9c014e747b 100644
--- a/examples/graph_mobilenet.cpp
+++ b/examples/graph_mobilenet.cpp
@@ -328,14 +328,14 @@ private:
3U, 3U,
get_weights_accessor(data_path, total_path + "depthwise_weights.npy"),
get_weights_accessor(data_path, total_path + "depthwise_bias.npy"),
- dwc_pad_stride_info, 1, depth_weights_quant_info)
+ dwc_pad_stride_info, 1, std::move(depth_weights_quant_info))
.set_name(total_path + "depthwise/depthwise")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)).set_name(total_path + "depthwise/Relu6")
<< ConvolutionLayer(
1U, 1U, conv_filt,
get_weights_accessor(data_path, total_path + "pointwise_weights.npy"),
get_weights_accessor(data_path, total_path + "pointwise_bias.npy"),
- conv_pad_stride_info, 1, point_weights_quant_info)
+ conv_pad_stride_info, 1, std::move(point_weights_quant_info))
.set_name(total_path + "pointwise/Conv2D")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)).set_name(total_path + "pointwise/Relu6");
diff --git a/src/graph/nodes/ActivationLayerNode.cpp b/src/graph/nodes/ActivationLayerNode.cpp
index ada6cf981f..6c0a7ddce6 100644
--- a/src/graph/nodes/ActivationLayerNode.cpp
+++ b/src/graph/nodes/ActivationLayerNode.cpp
@@ -31,7 +31,7 @@ namespace arm_compute
namespace graph
{
ActivationLayerNode::ActivationLayerNode(ActivationLayerInfo info, QuantizationInfo out_quant_info)
- : _info(info), _out_quant_info(out_quant_info)
+ : _info(info), _out_quant_info(std::move(out_quant_info))
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
diff --git a/src/graph/nodes/ConvolutionLayerNode.cpp b/src/graph/nodes/ConvolutionLayerNode.cpp
index 1c8dcaecfc..225393417d 100644
--- a/src/graph/nodes/ConvolutionLayerNode.cpp
+++ b/src/graph/nodes/ConvolutionLayerNode.cpp
@@ -37,7 +37,7 @@ ConvolutionLayerNode::ConvolutionLayerNode(PadStrideInfo info,
ConvolutionMethod method,
FastMathHint fast_math_hint,
QuantizationInfo out_quant_info)
- : _info(std::move(info)), _num_groups(num_groups), _method(method), _fast_math_hint(fast_math_hint), _out_quant_info(out_quant_info), _fused_activation()
+ : _info(std::move(info)), _num_groups(num_groups), _method(method), _fast_math_hint(fast_math_hint), _out_quant_info(std::move(out_quant_info)), _fused_activation()
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
diff --git a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
index cdd9e7b601..3e999735a4 100644
--- a/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
+++ b/src/graph/nodes/DepthwiseConvolutionLayerNode.cpp
@@ -34,7 +34,7 @@ namespace graph
{
DepthwiseConvolutionLayerNode::DepthwiseConvolutionLayerNode(PadStrideInfo info, int depth_multiplier, DepthwiseConvolutionMethod method,
QuantizationInfo out_quant_info)
- : _info(std::move(info)), _depth_multiplier(depth_multiplier), _method(method), _out_quant_info(out_quant_info), _fused_activation()
+ : _info(std::move(info)), _depth_multiplier(depth_multiplier), _method(method), _out_quant_info(std::move(out_quant_info)), _fused_activation()
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
diff --git a/src/graph/nodes/FullyConnectedLayer.cpp b/src/graph/nodes/FullyConnectedLayer.cpp
index 6ea0292505..80fce7b8a1 100644
--- a/src/graph/nodes/FullyConnectedLayer.cpp
+++ b/src/graph/nodes/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,7 +32,7 @@ namespace arm_compute
namespace graph
{
FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs, QuantizationInfo out_quant_info, FullyConnectedLayerInfo fc_info)
- : _num_outputs(num_outputs), _out_quant_info(out_quant_info), _info(fc_info)
+ : _num_outputs(num_outputs), _out_quant_info(std::move(out_quant_info)), _info(fc_info)
{
_input_edges.resize(3, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
@@ -41,7 +41,7 @@ FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs, Quant
TensorDescriptor FullyConnectedLayerNode::compute_weights_descriptor(const TensorDescriptor &input_descriptor,
unsigned int num_outputs,
FullyConnectedLayerInfo fc_info,
- QuantizationInfo weights_quant_info)
+ const QuantizationInfo &weights_quant_info)
{
unsigned int num_weights = 1;
unsigned int num_dimensions = input_descriptor.shape.num_dimensions();
@@ -75,7 +75,7 @@ TensorDescriptor FullyConnectedLayerNode::compute_weights_descriptor(const Tenso
TensorDescriptor FullyConnectedLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
unsigned int num_outputs,
- QuantizationInfo out_quant_info)
+ const QuantizationInfo &out_quant_info)
{
// Note: Only 1D batch space is supported at the moment
unsigned int batches = input_descriptor.shape[1];
diff --git a/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp b/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp
index c304a6c605..6496a71251 100644
--- a/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp
+++ b/src/graph/nodes/FusedConvolutionBatchNormalizationNode.cpp
@@ -37,7 +37,7 @@ FusedConvolutionBatchNormalizationNode::FusedConvolutionBatchNormalizationNode(f
ConvolutionMethod method,
FastMathHint fast_math_hint,
QuantizationInfo out_quant_info, ActivationLayerInfo fused_activation)
- : _epsilon(epsilon), _info(std::move(info)), _num_groups(num_groups), _method(method), _fast_math_hint(fast_math_hint), _out_quant_info(out_quant_info), _fused_activation(fused_activation)
+ : _epsilon(epsilon), _info(std::move(info)), _num_groups(num_groups), _method(method), _fast_math_hint(fast_math_hint), _out_quant_info(std::move(out_quant_info)), _fused_activation(fused_activation)
{
_input_edges.resize(7, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
diff --git a/src/graph/nodes/QuantizationLayerNode.cpp b/src/graph/nodes/QuantizationLayerNode.cpp
index 850dd38d3e..009d701171 100644
--- a/src/graph/nodes/QuantizationLayerNode.cpp
+++ b/src/graph/nodes/QuantizationLayerNode.cpp
@@ -31,7 +31,7 @@ namespace arm_compute
namespace graph
{
QuantizationLayerNode::QuantizationLayerNode(QuantizationInfo out_quant_info)
- : _out_quant_info(out_quant_info)
+ : _out_quant_info(std::move(out_quant_info))
{
_input_edges.resize(1, EmptyEdgeID);
_outputs.resize(1, NullTensorID);
diff --git a/src/runtime/NEON/functions/NECast.cpp b/src/runtime/NEON/functions/NECast.cpp
index fe1e486ab1..7e3c38bfb6 100644
--- a/src/runtime/NEON/functions/NECast.cpp
+++ b/src/runtime/NEON/functions/NECast.cpp
@@ -24,8 +24,8 @@
#include "arm_compute/runtime/NEON/functions/NECast.h"
#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h"
+#include "arm_compute/core/TensorInfo.h"
#include "support/ToolchainSupport.h"
#include <utility>
diff --git a/tests/validation/reference/ChannelExtract.cpp b/tests/validation/reference/ChannelExtract.cpp
index 6f17fc06fe..fc7ae7d6cb 100644
--- a/tests/validation/reference/ChannelExtract.cpp
+++ b/tests/validation/reference/ChannelExtract.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -61,7 +61,7 @@ SimpleTensor<uint8_t> channel_extract(const TensorShape &shape, const std::vecto
const auto *src_pixel = reinterpret_cast<const T *>(src(src_coord));
auto *dst_pixel = reinterpret_cast<T *>(dst(dst_coord));
- dst_pixel[0] = src_pixel[channel_idx];
+ dst_pixel[0] = src_pixel[channel_idx]; // NOLINT
}
}
diff --git a/tests/validation/reference/ColorConvertHelper.h b/tests/validation/reference/ColorConvertHelper.h
index b2ae6f2f80..abd1f5d1fe 100644
--- a/tests/validation/reference/ColorConvertHelper.h
+++ b/tests/validation/reference/ColorConvertHelper.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -9,14 +9,14 @@
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
- *asymm_int_mult
+ *
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, asymm_int_multDAMAGES OR OTHER
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
@@ -70,7 +70,7 @@ inline void store_rgb_from_src(const SimpleTensor<T> src, SimpleTensor<T> &rvec,
auto *gvec_pixel = reinterpret_cast<T *>(gvec(vec_coord));
auto *bvec_pixel = reinterpret_cast<T *>(bvec(vec_coord));
- rvec_pixel[0] = src_pixel[0];
+ rvec_pixel[0] = src_pixel[0]; // NOLINT
gvec_pixel[0] = src_pixel[1];
bvec_pixel[0] = src_pixel[2];
}
diff --git a/tests/validation/reference/PixelWiseMultiplication.cpp b/tests/validation/reference/PixelWiseMultiplication.cpp
index ea058ecdba..7dc32d08a7 100644
--- a/tests/validation/reference/PixelWiseMultiplication.cpp
+++ b/tests/validation/reference/PixelWiseMultiplication.cpp
@@ -128,7 +128,8 @@ struct BroadcastUnroll<0>
} // namespace
template <typename T1, typename T2>
-SimpleTensor<T2> pixel_wise_multiplication(const SimpleTensor<T1> &src1, const SimpleTensor<T2> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, QuantizationInfo qout)
+SimpleTensor<T2> pixel_wise_multiplication(const SimpleTensor<T1> &src1, const SimpleTensor<T2> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
+ const QuantizationInfo &qout)
{
ARM_COMPUTE_UNUSED(qout);
@@ -150,7 +151,7 @@ SimpleTensor<T2> pixel_wise_multiplication(const SimpleTensor<T1> &src1, const S
template <>
SimpleTensor<uint8_t> pixel_wise_multiplication(const SimpleTensor<uint8_t> &src1, const SimpleTensor<uint8_t> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy,
- QuantizationInfo qout)
+ const QuantizationInfo &qout)
{
SimpleTensor<uint8_t> dst(TensorShape::broadcast_shape(src1.shape(), src2.shape()), src2.data_type(), 1, qout);
@@ -177,10 +178,10 @@ SimpleTensor<uint8_t> pixel_wise_multiplication(const SimpleTensor<uint8_t> &src
}
// *INDENT-OFF*
// clang-format off
-template SimpleTensor<int16_t> pixel_wise_multiplication(const SimpleTensor<uint8_t> &src1, const SimpleTensor<int16_t> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, QuantizationInfo qout);
-template SimpleTensor<int16_t> pixel_wise_multiplication(const SimpleTensor<int16_t> &src1, const SimpleTensor<int16_t> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, QuantizationInfo qout);
-template SimpleTensor<float> pixel_wise_multiplication(const SimpleTensor<float> &src1, const SimpleTensor<float> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, QuantizationInfo qout);
-template SimpleTensor<half_float::half> pixel_wise_multiplication(const SimpleTensor<half_float::half> &src1, const SimpleTensor<half_float::half> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, QuantizationInfo qout);
+template SimpleTensor<int16_t> pixel_wise_multiplication(const SimpleTensor<uint8_t> &src1, const SimpleTensor<int16_t> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, const QuantizationInfo &qout);
+template SimpleTensor<int16_t> pixel_wise_multiplication(const SimpleTensor<int16_t> &src1, const SimpleTensor<int16_t> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, const QuantizationInfo &qout);
+template SimpleTensor<float> pixel_wise_multiplication(const SimpleTensor<float> &src1, const SimpleTensor<float> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, const QuantizationInfo &qout);
+template SimpleTensor<half_float::half> pixel_wise_multiplication(const SimpleTensor<half_float::half> &src1, const SimpleTensor<half_float::half> &src2, float scale, ConvertPolicy convert_policy, RoundingPolicy rounding_policy, const QuantizationInfo &qout);
// clang-format on
// *INDENT-ON*
} // namespace reference
diff --git a/tests/validation/reference/PixelWiseMultiplication.h b/tests/validation/reference/PixelWiseMultiplication.h
index 787a7b23e2..39d2bc794c 100644
--- a/tests/validation/reference/PixelWiseMultiplication.h
+++ b/tests/validation/reference/PixelWiseMultiplication.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,7 +36,7 @@ namespace reference
{
template <typename T1, typename T2>
SimpleTensor<T2> pixel_wise_multiplication(const SimpleTensor<T1> &src1, const SimpleTensor<T2> &src2, float scale,
- ConvertPolicy convert_policy, RoundingPolicy rounding_policy, QuantizationInfo qout = QuantizationInfo());
+ ConvertPolicy convert_policy, RoundingPolicy rounding_policy, const QuantizationInfo &qout = QuantizationInfo());
} // namespace reference
} // namespace validation
} // namespace test