aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-07-10 19:23:02 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-07-11 09:54:29 +0000
commitddec4d68b287f992df2493de819c908f79d2f443 (patch)
tree14c848fc88685e54a550d67918c23ececb33d6cc /tests
parentc878f1fdcc35c1d346fad6fbe975a55928f73187 (diff)
downloadComputeLibrary-ddec4d68b287f992df2493de819c908f79d2f443.tar.gz
COMPMID-2458: Initialize uninitialized variables
Change-Id: I18c39a7708a68861764b548c8d2bea3100be3612 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/1511 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h19
-rw-r--r--tests/validation/reference/DepthwiseConvolutionLayer.cpp10
2 files changed, 11 insertions, 18 deletions
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index 04c073b521..b01e1760aa 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -59,10 +59,6 @@ public:
void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType data_type,
QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, DataLayout data_layout, ActivationLayerInfo act_info)
{
- _input_quantization_info = input_quantization_info;
- _output_quantization_info = output_quantization_info;
-
- _data_type = data_type;
const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
TensorShape weights_shape(kernel_size.width, kernel_size.height);
@@ -113,8 +109,8 @@ protected:
TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape biases_shape, TensorShape output_shape, PadStrideInfo &pad_stride_info, Size2D dilation,
unsigned int depth_multiplier,
const DataType data_type, const DataType bias_data_type,
- const QuantizationInfo input_quantization_info, const QuantizationInfo output_quantization_info,
- const DataLayout data_layout, ActivationLayerInfo act_info)
+ const QuantizationInfo &input_quantization_info, const QuantizationInfo &output_quantization_info,
+ const DataLayout data_layout, const ActivationLayerInfo &act_info)
{
if(data_layout == DataLayout::NHWC)
{
@@ -164,8 +160,8 @@ protected:
const PadStrideInfo &pad_stride_info,
const Size2D &dilation, unsigned int depth_multiplier,
const DataType data_type, const DataType bias_data_type,
- const QuantizationInfo input_quantization_info, const QuantizationInfo output_quantization_info,
- ActivationLayerInfo act_info)
+ const QuantizationInfo &input_quantization_info, const QuantizationInfo &output_quantization_info,
+ const ActivationLayerInfo &act_info)
{
SimpleTensor<T> src{ in_shape, data_type, 1, input_quantization_info };
SimpleTensor<T> weights{ weights_shape, data_type, 1, input_quantization_info };
@@ -179,11 +175,8 @@ protected:
return (act_info.enabled()) ? reference::activation_layer<T>(depth_out, act_info) : depth_out;
}
- TensorType _target{};
- SimpleTensor<T> _reference{};
- DataType _data_type{};
- QuantizationInfo _input_quantization_info{};
- QuantizationInfo _output_quantization_info{};
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
index 2192d681b6..b1d2b923f7 100644
--- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
@@ -137,12 +137,12 @@ SimpleTensor<uint8_t> depthwise_convolution(const SimpleTensor<uint8_t> &src, co
const float input_scale = src.quantization_info().uniform().scale;
const int weights_offset = -weights.quantization_info().uniform().offset;
const float weights_scale = weights.quantization_info().uniform().scale;
- const int output_offset = dst.quantization_info().uniform().offset;
- const float output_scale = dst.quantization_info().uniform().scale;
+ const int output_offset = dst_qinfo.uniform().offset;
+ const float output_scale = dst_qinfo.uniform().scale;
- int output_multiplier;
- int output_shift;
- const float multiplier = input_scale * weights_scale / output_scale;
+ int output_multiplier = 0;
+ int output_shift = 0;
+ const float multiplier = input_scale * weights_scale / output_scale;
arm_compute::quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
// Compute reference