aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-02-01 16:31:33 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:47:18 +0000
commitced7a8d0b4fe77d750a1e55883d5886ad9760f3b (patch)
tree3c47818c8111f48fe6e9c7e9109c93a6e2ae9a2c
parent1d6d211ef31698bc1864007961522dfae7bda3db (diff)
downloadComputeLibrary-ced7a8d0b4fe77d750a1e55883d5886ad9760f3b.tar.gz
COMPMID-875: Deconvolution 4x4 not working
-Enforces the use of the ConvolutionLayer function in the DeconvolutionLayer. -Adds tests for 4x4 Deconvolution. -Alters the ConvolutionLayer validation to support even kernels. Change-Id: Id27e285f078e690b8dd58490dd8ea6d875b3cec6 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/118632 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h7
-rw-r--r--src/runtime/CL/functions/CLDeconvolutionLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NEDeconvolutionLayer.cpp19
-rw-r--r--tests/datasets/ShapeDatasets.h3
-rw-r--r--tests/validation/CL/DeconvolutionLayer.cpp17
-rw-r--r--tests/validation/NEON/DeconvolutionLayer.cpp17
-rw-r--r--tests/validation/reference/ConvolutionLayer.cpp60
-rw-r--r--tests/validation/reference/DeconvolutionLayer.cpp2
9 files changed, 91 insertions, 44 deletions
diff --git a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
index e98cc9b3d6..2383d2aa1d 100644
--- a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,8 +24,8 @@
#ifndef __ARM_COMPUTE_CLDECONVOLUTIONLAYER_H__
#define __ARM_COMPUTE_CLDECONVOLUTIONLAYER_H__
+#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h"
-#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLTensor.h"
@@ -96,7 +96,7 @@ public:
private:
CLMemoryGroup _memory_group;
CLDeconvolutionLayerUpsample _scale_f;
- CLDirectConvolutionLayer _conv_f;
+ CLConvolutionLayer _conv_f;
CLTensor _scaled_output;
};
}
diff --git a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
index 091a928db6..1b3297e8d0 100644
--- a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#ifndef __ARM_COMPUTE_NEDECONVOLUTIONLAYER_H__
#define __ARM_COMPUTE_NEDECONVOLUTIONLAYER_H__
+#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
#include "arm_compute/core/Types.h"
@@ -95,11 +96,13 @@ public:
private:
MemoryGroup _memory_group;
- NEDirectConvolutionLayer _conv_f;
+ NEDirectConvolutionLayer _direct_conv_f;
+ NEConvolutionLayer _conv_f;
Tensor _scaled_output;
ITensor *_input;
PadStrideInfo _info;
std::pair<unsigned int, unsigned int> _inner_border;
+ bool _run_direct_convolution;
};
} // arm_compute
#endif /* __ARM_COMPUTE_NEDECONVOLUTIONLAYER_H__ */
diff --git a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp
index 1c55722344..79f6d6c10e 100644
--- a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -79,7 +79,7 @@ Status CLDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInf
const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL);
ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionLayerUpsample::validate(input, &scale_out_info, BorderSize(inner_border_right, inner_border_top), info));
- ARM_COMPUTE_RETURN_ON_ERROR(CLDirectConvolutionLayer::validate(&scale_out_info, weights, bias, output, conv_info));
+ // TODO (COMPMID-754): Add validation of CLConvolutionLayer when added.
return Status{};
}
diff --git a/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp b/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp
index 7bce8a6b7c..b293fa080a 100644
--- a/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,11 +33,13 @@ using namespace arm_compute::misc::shape_calculator;
NEDeconvolutionLayer::NEDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
: _memory_group(std::move(memory_manager)),
+ _direct_conv_f(),
_conv_f(),
_scaled_output(),
_input(nullptr),
_info(),
- _inner_border()
+ _inner_border(),
+ _run_direct_convolution(false)
{
}
@@ -47,11 +49,12 @@ void NEDeconvolutionLayer::configure(ITensor *input, const ITensor *weights, con
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
ARM_COMPUTE_ERROR_ON(weights->info()->dimension(0) != weights->info()->dimension(1));
- ARM_COMPUTE_ERROR_ON(weights->info()->dimension(0) != 1 && weights->info()->dimension(0) != 3 && weights->info()->dimension(0) != 5);
_input = input;
_info = info;
_inner_border = std::make_pair(inner_border_right, inner_border_top);
+ // FIXME: ConvolutionLayer Segfaults in GEMM assembly code for 1x1 convolutions
+ _run_direct_convolution = (weights->info()->dimension(0) == weights->info()->dimension(1)) && (weights->info()->dimension(0) == 1);
const unsigned int stride_x = info.stride().first;
const unsigned int stride_y = info.stride().second;
@@ -75,7 +78,9 @@ void NEDeconvolutionLayer::configure(ITensor *input, const ITensor *weights, con
// setup the function to convolve the upscaled output
const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL);
- _conv_f.configure(&_scaled_output, weights, bias, output, conv_info);
+ (_run_direct_convolution) ? _direct_conv_f.configure(&_scaled_output, weights, bias, output, conv_info) : _conv_f.configure(&_scaled_output, weights, bias, output, conv_info);
+
+ // Allocate auxiliary tensors
_scaled_output.allocator()->allocate();
}
@@ -92,7 +97,7 @@ void NEDeconvolutionLayer::run()
const int stride_x = _info.stride().first;
const int stride_y = _info.stride().second;
- std::fill_n(reinterpret_cast<float *>(_scaled_output.buffer()), _scaled_output.info()->tensor_shape().total_size(), 0.f);
+ std::fill_n(_scaled_output.buffer(), _scaled_output.info()->total_size(), 0);
// scaled_output is the input for the forward convolution. We copy the input elements to scaled_output
// and insert rows and columns with zeroes depending on the stride values.
@@ -113,6 +118,8 @@ void NEDeconvolutionLayer::run()
}
}
- _conv_f.run();
+ // Run convolution layer
+ (_run_direct_convolution) ? _direct_conv_f.run() : _conv_f.run();
+
_memory_group.release();
}
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index 9114f514aa..a5f0863746 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -270,6 +270,7 @@ public:
ShapeDataset("Shape1",
{
TensorShape{ 1921U, 1U, 2U },
+ TensorShape{ 1921U, 1U, 2U },
TensorShape{ 641U, 1U, 2U, 3U },
TensorShape{ 1U, 127U, 25U },
TensorShape{ 799U, 595U, 1U, 4U }
@@ -345,7 +346,7 @@ public:
SmallDeconvolutionShapes()
: ShapeDataset("InputShape",
{
- TensorShape{ 4U, 3U, 3U, 2U },
+ TensorShape{ 5U, 4U, 3U, 2U },
TensorShape{ 5U, 5U, 3U },
TensorShape{ 11U, 13U, 4U, 3U }
})
diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp
index 59e85537e5..58a20268ef 100644
--- a/tests/validation/CL/DeconvolutionLayer.cpp
+++ b/tests/validation/CL/DeconvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,6 +45,9 @@ namespace
{
constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
+const auto data4x4 = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 4) * framework::dataset::make("StrideY", 1, 4) * framework::dataset::make("PadX", 0, 3)
+ * framework::dataset::make("PadY", 0, 3) * framework::dataset::make("ax", 0) * framework::dataset::make("ay", 0) * framework::dataset::make("NumKernels", { 1, 3 });
+
const auto data3x3 = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 4) * framework::dataset::make("StrideY", 1, 4) * framework::dataset::make("PadX", 0, 2)
* framework::dataset::make("PadY", 0, 2) * framework::dataset::make("ax", 0) * framework::dataset::make("ay", 0) * framework::dataset::make("NumKernels", { 1, 3 });
@@ -157,6 +160,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
// *INDENT-ON*
template <typename T>
+using CLDeconvolutionLayerFixture4x4 = DeconvolutionValidationFixture<CLTensor, CLAccessor, CLDeconvolutionLayer, T, 4, 4>;
+
+template <typename T>
using CLDeconvolutionLayerFixture3x3 = DeconvolutionValidationFixture<CLTensor, CLAccessor, CLDeconvolutionLayer, T, 3, 3>;
template <typename T>
@@ -165,6 +171,15 @@ using CLDeconvolutionLayerFixture1x1 = DeconvolutionValidationFixture<CLTensor,
TEST_SUITE(Float)
TEST_SUITE(FP32)
+TEST_SUITE(W4x4)
+
+FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture4x4<float>, framework::DatasetMode::ALL, combine(data4x4, framework::dataset::make("DataType", DataType::F32)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
+TEST_SUITE_END()
+
TEST_SUITE(W3x3)
FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerFixture3x3<float>, framework::DatasetMode::ALL, combine(data3x3, framework::dataset::make("DataType", DataType::F32)))
diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp
index 9573784d86..566b75a827 100644
--- a/tests/validation/NEON/DeconvolutionLayer.cpp
+++ b/tests/validation/NEON/DeconvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,6 +44,9 @@ namespace
{
constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
+const auto data4x4 = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 4) * framework::dataset::make("StrideY", 1, 4) * framework::dataset::make("PadX", 0, 3)
+ * framework::dataset::make("PadY", 0, 3) * framework::dataset::make("ax", 0) * framework::dataset::make("ay", 0) * framework::dataset::make("NumKernels", { 1, 3 });
+
const auto data3x3 = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 4) * framework::dataset::make("StrideY", 1, 4) * framework::dataset::make("PadX", 0, 2)
* framework::dataset::make("PadY", 0, 2) * framework::dataset::make("ax", 0) * framework::dataset::make("ay", 0) * framework::dataset::make("NumKernels", { 1, 3 });
@@ -56,6 +59,9 @@ TEST_SUITE(NEON)
TEST_SUITE(DeconvolutionLayer)
template <typename T>
+using NEDeconvolutionLayerFixture4x4 = DeconvolutionValidationFixture<Tensor, Accessor, NEDeconvolutionLayer, T, 4, 4>;
+
+template <typename T>
using NEDeconvolutionLayerFixture3x3 = DeconvolutionValidationFixture<Tensor, Accessor, NEDeconvolutionLayer, T, 3, 3>;
template <typename T>
@@ -64,6 +70,15 @@ using NEDeconvolutionLayerFixture1x1 = DeconvolutionValidationFixture<Tensor, Ac
TEST_SUITE(Float)
TEST_SUITE(FP32)
+TEST_SUITE(W4x4)
+
+FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture4x4<float>, framework::DatasetMode::ALL, combine(data4x4, framework::dataset::make("DataType", DataType::F32)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
+TEST_SUITE_END()
+
TEST_SUITE(W3x3)
FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerFixture3x3<float>, framework::DatasetMode::ALL, combine(data3x3, framework::dataset::make("DataType", DataType::F32)))
diff --git a/tests/validation/reference/ConvolutionLayer.cpp b/tests/validation/reference/ConvolutionLayer.cpp
index 567fac0f5e..b7ed2f56c0 100644
--- a/tests/validation/reference/ConvolutionLayer.cpp
+++ b/tests/validation/reference/ConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,8 +58,10 @@ void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, co
const TB *b_ptr = bias.data() + b_offset;
T *out_ptr = out.data() + o_offset;
- const int half_width_weights = width_weights / 2;
- const int half_height_weights = height_weights / 2;
+ const int half_width_weights_start = width_weights / 2;
+ const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
+ const int half_height_weights_start = height_weights / 2;
+ const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
// Reset accumulator
T acc(0);
@@ -71,15 +73,15 @@ void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, co
const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
// Compute 2D convolution
- for(int yk = -half_height_weights; yk <= half_height_weights; ++yk)
+ for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
{
- for(int xk = -half_width_weights; xk <= half_width_weights; ++xk)
+ for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
{
// Check if the pixel is out-of-bound
if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
{
- const int idx = xk + half_width_weights;
- const int idy = yk + half_height_weights;
+ const int idx = xk + half_width_weights_start;
+ const int idy = yk + half_height_weights_start;
const T i_value = in_ptr[offset_slice_in + xk + yk * width_in];
const T w_value = w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights];
@@ -106,8 +108,10 @@ void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, co
T *out_ptr = out.data() + o_offset;
int fixed_point_position = in.fixed_point_position();
- const int half_width_weights = width_weights / 2;
- const int half_height_weights = height_weights / 2;
+ const int half_width_weights_start = width_weights / 2;
+ const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
+ const int half_height_weights_start = height_weights / 2;
+ const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
using namespace fixed_point_arithmetic;
using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
@@ -122,15 +126,15 @@ void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, co
const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
// Compute 2D convolution
- for(int yk = -half_height_weights; yk <= half_height_weights; ++yk)
+ for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
{
- for(int xk = -half_width_weights; xk <= half_width_weights; ++xk)
+ for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
{
// Check if the pixel is out-of-bound
if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
{
- const int idx = xk + half_width_weights;
- const int idy = yk + half_height_weights;
+ const int idx = xk + half_width_weights_start;
+ const int idy = yk + half_height_weights_start;
const fixed_point<promoted_type> i_value(in_ptr[offset_slice_in + xk + yk * width_in], fixed_point_position, true);
const fixed_point<promoted_type> w_value(w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights], fixed_point_position, true);
@@ -173,8 +177,10 @@ void convolution3d(const SimpleTensor<uint8_t> &in, const SimpleTensor<uint8_t>
const float multiplier = input_scale * weights_scale / output_scale;
arm_compute::quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
- const int half_width_weights = width_weights / 2;
- const int half_height_weights = height_weights / 2;
+ const int half_width_weights_start = width_weights / 2;
+ const int half_width_weights_end = ((width_weights % 2) == 0) ? (half_width_weights_start - 1) : half_width_weights_start;
+ const int half_height_weights_start = height_weights / 2;
+ const int half_height_weights_end = ((height_weights % 2) == 0) ? (half_height_weights_start - 1) : half_height_weights_start;
// Reset accumulator
int32_t acc(0);
@@ -186,15 +192,15 @@ void convolution3d(const SimpleTensor<uint8_t> &in, const SimpleTensor<uint8_t>
const int offset_slice_in = xi + yi * width_in + ifm * width_in * height_in;
// Compute 2D convolution
- for(int yk = -half_height_weights; yk <= half_height_weights; ++yk)
+ for(int yk = -half_height_weights_start; yk <= half_height_weights_end; ++yk)
{
- for(int xk = -half_width_weights; xk <= half_width_weights; ++xk)
+ for(int xk = -half_width_weights_start; xk <= half_width_weights_end; ++xk)
{
// Check if the pixel is out-of-bound
if(is_valid_pixel(xi + xk, 0, width_in) && is_valid_pixel(yi + yk, 0, height_in))
{
- const int idx = xk + half_width_weights;
- const int idy = yk + half_height_weights;
+ const int idx = xk + half_width_weights_start;
+ const int idy = yk + half_height_weights_start;
const uint8_t i_value = in_ptr[offset_slice_in + xk + yk * width_in];
const uint8_t w_value = w_ptr[idx + idy * width_weights + ifm * width_weights * height_weights];
@@ -233,17 +239,17 @@ SimpleTensor<T> convolution_layer(const SimpleTensor<T> &src, const SimpleTensor
const int width_weights = weights.shape().x();
const int height_weights = weights.shape().y();
const int depth_weights = weights.shape().z();
- const int pad_left = std::min(static_cast<int>(info.pad_left()), width_weights / 2);
- const int pad_top = std::min(static_cast<int>(info.pad_top()), height_weights / 2);
- const int pad_right = std::min(static_cast<int>(info.pad_right()), width_weights / 2);
- const int pad_bottom = std::min(static_cast<int>(info.pad_bottom()), height_weights / 2);
+ const int pad_left = info.pad_left();
+ const int pad_top = info.pad_top();
+ const int stride_xi = info.stride().first;
+ const int stride_yi = info.stride().second;
+
+ auto output_wh = scaled_dimensions(width_in, height_in, width_weights, height_weights, info);
const int start_xi = width_weights / 2 - pad_left;
const int start_yi = height_weights / 2 - pad_top;
- const int end_xi = width_in + pad_left - width_weights / 2 + pad_right - width_weights / 2;
- const int end_yi = height_in + pad_top - height_weights / 2 + pad_bottom - height_weights / 2;
- const int stride_xi = info.stride().first;
- const int stride_yi = info.stride().second;
+ const int end_xi = output_wh.first * stride_xi;
+ const int end_yi = output_wh.second * stride_yi;
const int num_batches = src.shape().total_size() / (width_in * height_in * depth_in);
for(int r = 0; r < num_batches; ++r)
diff --git a/tests/validation/reference/DeconvolutionLayer.cpp b/tests/validation/reference/DeconvolutionLayer.cpp
index 0cf1087346..617f6908e4 100644
--- a/tests/validation/reference/DeconvolutionLayer.cpp
+++ b/tests/validation/reference/DeconvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*