aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUsama Arif <usama.arif@arm.com>2019-03-14 15:36:54 +0000
committerPablo Marquez <pablo.tello@arm.com>2019-03-27 09:22:04 +0000
commit8cf8c1123440c2002ee108d1949529bf21eac944 (patch)
treecc61d9ed5ee805c4356b8497b2e81f67b194b36a
parentadc2186c06ca27f368dfe6ceadce449551259efc (diff)
downloadComputeLibrary-8cf8c1123440c2002ee108d1949529bf21eac944.tar.gz
COMPMID-1944 Add support for "reflect" padding mode in NEPad
Change-Id: I56c42524497d37d44708648571fa211ac1afbd98 Signed-off-by: Usama Arif <usama.arif@arm.com> Reviewed-on: https://review.mlplatform.org/c/885 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
-rw-r--r--arm_compute/core/Helpers.h9
-rw-r--r--arm_compute/core/Helpers.inl31
-rw-r--r--arm_compute/runtime/CL/functions/CLPadLayer.h12
-rw-r--r--arm_compute/runtime/NEON/functions/NEPadLayer.h44
-rw-r--r--src/runtime/CL/functions/CLPadLayer.cpp6
-rw-r--r--src/runtime/NEON/functions/NEPadLayer.cpp245
-rw-r--r--tests/validation/CL/PadLayer.cpp20
-rw-r--r--tests/validation/NEON/PadLayer.cpp148
-rw-r--r--tests/validation/fixtures/PadLayerFixture.h48
-rw-r--r--utils/TypePrinter.h40
10 files changed, 487 insertions, 116 deletions
diff --git a/arm_compute/core/Helpers.h b/arm_compute/core/Helpers.h
index c7c7110ef5..235657a38a 100644
--- a/arm_compute/core/Helpers.h
+++ b/arm_compute/core/Helpers.h
@@ -707,6 +707,15 @@ inline int coords2index(const TensorShape &shape, const Coordinates &coord);
*/
inline size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension);
+/** Get the DataLayoutDimension of a given index and layout.
+ *
+ * @param[in] data_layout The data layout.
+ * @param[in] index The data layout index.
+ *
+ * @return The dimension which this index is requested for.
+ */
+inline DataLayoutDimension get_index_data_layout_dimension(const DataLayout data_layout, const size_t index);
+
/** Calculate the normalization dimension index for a given normalization type
*
* @param[in] layout Data layout of the input and output tensor
diff --git a/arm_compute/core/Helpers.inl b/arm_compute/core/Helpers.inl
index c0e4ab8d7d..aeb290b23e 100644
--- a/arm_compute/core/Helpers.inl
+++ b/arm_compute/core/Helpers.inl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -351,4 +351,33 @@ inline size_t get_data_layout_dimension_index(const DataLayout data_layout, cons
break;
}
}
+
+inline DataLayoutDimension get_index_data_layout_dimension(const DataLayout data_layout, const size_t index)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(data_layout == DataLayout::UNKNOWN, "Cannot retrieve the dimension index for an unknown layout!");
+
+ /* Return the index based on the data layout
+ * [N C H W]
+ * [3 2 1 0]
+ * [N H W C]
+ */
+ switch(index)
+ {
+ case 0:
+ return (data_layout == DataLayout::NCHW) ? DataLayoutDimension::WIDTH : DataLayoutDimension::CHANNEL;
+ break;
+ case 1:
+ return (data_layout == DataLayout::NCHW) ? DataLayoutDimension::HEIGHT : DataLayoutDimension::WIDTH;
+ break;
+ case 2:
+ return (data_layout == DataLayout::NCHW) ? DataLayoutDimension::CHANNEL : DataLayoutDimension::HEIGHT;
+ break;
+ case 3:
+ return DataLayoutDimension::BATCHES;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Index value not supported!");
+ break;
+ }
+}
} // namespace arm_compute
diff --git a/arm_compute/runtime/CL/functions/CLPadLayer.h b/arm_compute/runtime/CL/functions/CLPadLayer.h
index 1ecf82fa7c..0179441af2 100644
--- a/arm_compute/runtime/CL/functions/CLPadLayer.h
+++ b/arm_compute/runtime/CL/functions/CLPadLayer.h
@@ -53,9 +53,12 @@ public:
* @param[out] output Output tensor. Data type supported: same as @p input
* @param[in] padding The padding for each spatial dimension of the input tensor. The pair padding[i]
* specifies the front and the end padding in the i-th dimension.
- * @param[in] constant_value (Optional) Constant value to be used for the padding
+ * @param[in] constant_value (Optional) Constant value to be used for the padding.
+ * @param[in] mode (Optional) Controls whether the padding should be filled with @p constant_value using CONSTANT,
+ * or reflect the input, either including the border values (SYMMETRIC) or not (REFLECT). Only CONSTANT
+ * is currently supported.
*/
- void configure(ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value = PixelValue());
+ void configure(ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value = PixelValue(), PaddingMode mode = PaddingMode::CONSTANT);
/** Static function to check if given info will lead to a valid configuration of @ref CLPadLayer.
*
@@ -64,8 +67,11 @@ public:
* @param[in] padding The padding for each spatial dimension of the input tensor. The pair padding[i]
* specifies the front and the end padding in the i-th dimension.
* @param[in] constant_value (Optional) Constant value to be used for the padding
+ * @param[in] mode (Optional) Controls whether the padding should be filled with @p constant_value using CONSTANT,
+ * or reflect the input, either including the border values (SYMMETRIC) or not (REFLECT). Only CONSTANT
+ * is currently supported.
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value = PixelValue());
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value = PixelValue(), PaddingMode mode = PaddingMode::CONSTANT);
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NEPadLayer.h b/arm_compute/runtime/NEON/functions/NEPadLayer.h
index 3a0863802a..78dbc1f1f9 100644
--- a/arm_compute/runtime/NEON/functions/NEPadLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEPadLayer.h
@@ -25,17 +25,17 @@
#define __ARM_COMPUTE_NEPADLAYER_H__
#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEStridedSlice.h"
#include "arm_compute/runtime/SubTensor.h"
#include "arm_compute/core/NEON/kernels/NECopyKernel.h"
#include "arm_compute/core/NEON/kernels/NEMemsetKernel.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/Tensor.h"
namespace arm_compute
{
-// Forward declarations
-class ITensor;
-
/** Basic function to pad a tensor. This function calls the following NEON kernels:
*
* -# @ref NEMemsetKernel
@@ -53,8 +53,10 @@ public:
* @param[in] padding The padding for each spatial dimension of the input tensor. The pair padding[i]
* specifies the front and the end padding in the i-th dimension.
* @param[in] constant_value (Optional) Constant value to be used for the padding
+ * @param[in] mode (Optional) Controls whether the padding should be filled with @p constant_value using CONSTANT,
+ * or reflect the input, either including the border values (SYMMETRIC) or not (REFLECT).
*/
- void configure(ITensor *input, ITensor *output, const PaddingList &padding, PixelValue constant_value = PixelValue());
+ void configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value = PixelValue(), const PaddingMode mode = PaddingMode::CONSTANT);
/** Static function to check if given info will lead to a valid configuration of @ref NEPadLayer.
*
* @param[in] input Source tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
@@ -62,18 +64,44 @@ public:
* @param[in] padding The padding for each spatial dimension of the input tensor. The pair padding[i]
* specifies the front and the end padding in the i-th dimension.
* @param[in] constant_value (Optional) Constant value to be used for the padding
+ * @param[in] mode (Optional) Controls whether the padding should be filled with @p constant_value using CONSTANT,
+ * or reflect the input, either including the border values (SYMMETRIC) or not (REFLECT).
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value = PixelValue());
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value = PixelValue(), const PaddingMode mode = PaddingMode::CONSTANT);
// Inherited methods overridden:
void run() override;
private:
- NEMemsetKernel _memset_kernel;
- NECopyKernel _copy_kernel;
- SubTensor _output_subtensor;
+ /** Configure kernels for when constant padding is used.
+ *
+ * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[out] output Output tensor. Data type supported: same as @p input
+ * @param[in] padding The padding for each spatial dimension of the input tensor. The pair padding[i]
+ * specifies the front and the end padding in the i-th dimension.
+ * @param[in] constant_value Constant value to be used for the padding
+ */
+ void configure_constant_mode(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value);
+ /** Configure functions for when reflect or symmetric padding is used.
+ *
+ * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[out] output Output tensor. Data type supported: same as @p input
+ */
+ void configure_reflect_symmetric_mode(ITensor *input, ITensor *output);
+
+private:
+ NECopyKernel _copy_kernel;
+ PaddingMode _mode;
+ PaddingList _padding;
+ NEMemsetKernel _memset_kernel;
+ uint32_t _num_dimensions;
+ std::unique_ptr<NEStridedSlice[]> _slice_functions;
+ std::unique_ptr<NEConcatenateLayer[]> _concat_functions;
+ std::unique_ptr<Tensor[]> _slice_results;
+ std::unique_ptr<Tensor[]> _concat_results;
+ SubTensor _output_subtensor;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEPADLAYER_H__ */
diff --git a/src/runtime/CL/functions/CLPadLayer.cpp b/src/runtime/CL/functions/CLPadLayer.cpp
index 3aa1b1e1a0..fac2364ae5 100644
--- a/src/runtime/CL/functions/CLPadLayer.cpp
+++ b/src/runtime/CL/functions/CLPadLayer.cpp
@@ -34,8 +34,9 @@ CLPadLayer::CLPadLayer()
{
}
-void CLPadLayer::configure(ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value)
+void CLPadLayer::configure(ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
{
+ ARM_COMPUTE_UNUSED(mode);
// Copy the input to the output
_copy_kernel.configure(input, output, padding);
@@ -46,10 +47,11 @@ void CLPadLayer::configure(ICLTensor *input, ICLTensor *output, const PaddingLis
_fillborder_kernel.configure(input, input->info()->padding(), BorderMode::CONSTANT, constant_value);
}
-Status CLPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value)
+Status CLPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode)
{
ARM_COMPUTE_RETURN_ON_ERROR(CLMemsetKernel::validate(input, constant_value));
ARM_COMPUTE_RETURN_ON_ERROR(CLCopyKernel::validate(input, output, padding));
+ ARM_COMPUTE_RETURN_ERROR_ON(mode != PaddingMode::CONSTANT);
return Status{};
}
diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp
index f5c2718cec..62a7d4559b 100644
--- a/src/runtime/NEON/functions/NEPadLayer.cpp
+++ b/src/runtime/NEON/functions/NEPadLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
@@ -61,18 +60,29 @@ Coordinates get_subtensor_coords(const PaddingList &paddings)
return coords;
}
+
+uint32_t last_padding_dimension(const PaddingList &padding)
+{
+ int last_padding_dim = padding.size() - 1;
+ for(; last_padding_dim >= 0; --last_padding_dim)
+ {
+ if(padding[last_padding_dim].first > 0 || padding[last_padding_dim].second > 0)
+ {
+ break;
+ }
+ }
+ return static_cast<uint32_t>(last_padding_dim);
+}
} // namespace
NEPadLayer::NEPadLayer()
- : _memset_kernel(), _copy_kernel(), _output_subtensor()
+ : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(nullptr), _concat_functions(nullptr), _slice_results(nullptr), _concat_results(nullptr),
+ _output_subtensor()
{
}
-void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &padding, PixelValue constant_value)
+void NEPadLayer::configure_constant_mode(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_THROW_ON_ERROR(NEPadLayer::validate(input->info(), output->info(), padding, constant_value));
-
// Auto-init
auto_init_if_empty(*output->info(), get_expected_output_tensorinfo(*input->info(), padding));
@@ -86,23 +96,230 @@ void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &p
_copy_kernel.configure(input, &_output_subtensor);
}
-Status NEPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value)
+void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *output)
+{
+ // Reflecting can be performed by effectively unfolding the input as follows:
+ // For each dimension starting at DimX:
+ // For before and after:
+ // Use strided slice to extract and reverse the part of the
+ // input / previously produced tensor required for the padding.
+ // Concatenate the before and after padding with the input / previously
+ // produced tensor along the current dimension.
+
+ // Two strided slice functions will be required for each dimension padded as well as a
+ // concatenate function and the tensors to hold the temporary results.
+ _slice_functions = arm_compute::support::cpp14::make_unique<NEStridedSlice[]>(2 * _num_dimensions);
+ _slice_results = arm_compute::support::cpp14::make_unique<Tensor[]>(2 * _num_dimensions);
+ _concat_functions = arm_compute::support::cpp14::make_unique<NEConcatenateLayer[]>(_num_dimensions);
+ _concat_results = arm_compute::support::cpp14::make_unique<Tensor[]>(_num_dimensions - 1);
+ Coordinates starts_before, ends_before, starts_after, ends_after, strides;
+ ITensor *prev = input;
+ for(uint32_t i = 0; i < _num_dimensions; ++i)
+ {
+ // Values in strides from the previous dimensions need to be set to 1 to avoid reversing again.
+ if(i > 0)
+ {
+ strides.set(i - 1, 1);
+ }
+
+ if(_padding[i].first > 0 || _padding[i].second > 0)
+ {
+ // Set the starts, ends, and strides values for the current dimension.
+ // Due to the bit masks passed to strided slice, the values below the current dimension in
+ // starts and ends will be ignored so do not need to be modified.
+ if(_mode == PaddingMode::REFLECT)
+ {
+ starts_before.set(i, _padding[i].first);
+ ends_before.set(i, 0);
+ starts_after.set(i, input->info()->dimension(i) - 2);
+ ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 2);
+ strides.set(i, -1);
+ }
+ else
+ {
+ starts_before.set(i, _padding[i].first - 1);
+ ends_before.set(i, -1);
+ starts_after.set(i, input->info()->dimension(i) - 1);
+ ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 1);
+ strides.set(i, -1);
+ }
+
+ // Strided slice wraps negative indexes around to the end of the range,
+ // instead this should indicate use of the full range and so the bit mask will be modified.
+ const int32_t begin_mask_before = starts_before[i] < 0 ? ~0 : ~(1u << i);
+ const int32_t end_mask_before = ends_before[i] < 0 ? ~0 : ~(1u << i);
+ const int32_t begin_mask_after = starts_after[i] < 0 ? ~0 : ~(1u << i);
+ const int32_t end_mask_after = ends_after[i] < 0 ? ~0 : ~(1u << i);
+
+ // Reflect the input values for the padding before and after the input.
+ std::vector<ITensor *> concat_vector;
+ if(_padding[i].first > 0)
+ {
+ if(i < prev->info()->num_dimensions())
+ {
+ _slice_functions[2 * i].configure(prev, &_slice_results[2 * i], starts_before, ends_before, strides, begin_mask_before, end_mask_before);
+ concat_vector.push_back(&_slice_results[2 * i]);
+ }
+ else
+ {
+ // Performing the slice is unnecessary if the result would simply be a copy of the tensor.
+ concat_vector.push_back(prev);
+ }
+ }
+ concat_vector.push_back(prev);
+ if(_padding[i].second > 0)
+ {
+ if(i < prev->info()->num_dimensions())
+ {
+ _slice_functions[2 * i + 1].configure(prev, &_slice_results[2 * i + 1], starts_after, ends_after, strides, begin_mask_after, end_mask_after);
+ concat_vector.push_back(&_slice_results[2 * i + 1]);
+ }
+ else
+ {
+ // Performing the slice is unnecessary if the result would simply be a copy of the tensor.
+ concat_vector.push_back(prev);
+ }
+ }
+ // Concatenate the padding before and after with the input.
+ ITensor *out = (i == _num_dimensions - 1) ? output : &_concat_results[i];
+ _concat_functions[i].configure(concat_vector, out, get_index_data_layout_dimension(input->info()->data_layout(), i));
+ if(i != _num_dimensions - 1)
+ {
+ _concat_results[i].allocator()->allocate();
+ }
+ prev = out;
+ }
+ _slice_results[2 * i].allocator()->allocate();
+ _slice_results[2 * i + 1].allocator()->allocate();
+ }
+}
+
+void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
+{
+ ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), padding, constant_value, mode));
+
+ _padding = padding;
+ _mode = mode;
+
+ const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), _padding);
+
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(padded_shape));
+
+ // Find the last dimension requiring padding so that it is known when to write to output and whether any padding is applied.
+ _num_dimensions = last_padding_dimension(padding) + 1;
+ if(_num_dimensions > 0)
+ {
+ switch(_mode)
+ {
+ case PaddingMode::CONSTANT:
+ {
+ configure_constant_mode(input, output, padding, constant_value);
+ break;
+ }
+ case PaddingMode::REFLECT:
+ case PaddingMode::SYMMETRIC:
+ {
+ configure_reflect_symmetric_mode(input, output);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Padding mode not supported.");
+ }
+ }
+ else
+ {
+ // Copy the input to the whole output if no padding is applied
+ _copy_kernel.configure(input, output);
+ }
+}
+
+Status NEPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
{
ARM_COMPUTE_UNUSED(constant_value);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- auto output_clone = output->clone();
+ const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding);
- SubTensorInfo output_subtensor_info(output_clone.get(), input->tensor_shape(), get_subtensor_coords(padding), true);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input, *output_clone, padding));
- ARM_COMPUTE_RETURN_ON_ERROR(NECopyKernel::validate(input, &output_subtensor_info));
+ if(output->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), padded_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+ switch(mode)
+ {
+ case PaddingMode::CONSTANT:
+ {
+ auto output_clone = output->clone();
+ SubTensorInfo output_subtensor_info(output_clone.get(), input->tensor_shape(), get_subtensor_coords(padding), true);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input, *output_clone, padding));
+ ARM_COMPUTE_RETURN_ON_ERROR(NECopyKernel::validate(input, &output_subtensor_info));
+ break;
+ }
+ case PaddingMode::REFLECT:
+ case PaddingMode::SYMMETRIC:
+ {
+ for(uint32_t i = 0; i < padding.size(); ++i)
+ {
+ if(mode == PaddingMode::REFLECT)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first >= input->dimension(i));
+ ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second >= input->dimension(i));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first > input->dimension(i));
+ ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second > input->dimension(i));
+ }
+ }
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Invalid mode");
+ }
+ }
return Status{};
}
void NEPadLayer::run()
{
- NEScheduler::get().schedule(&_memset_kernel, Window::DimY);
- NEScheduler::get().schedule(&_copy_kernel, Window::DimY);
+ if(_num_dimensions > 0)
+ {
+ switch(_mode)
+ {
+ case PaddingMode::CONSTANT:
+ {
+ NEScheduler::get().schedule(&_memset_kernel, Window::DimY);
+ NEScheduler::get().schedule(&_copy_kernel, Window::DimY);
+ break;
+ }
+ case PaddingMode::REFLECT:
+ case PaddingMode::SYMMETRIC:
+ {
+ for(uint32_t i = 0; i < _num_dimensions; ++i)
+ {
+ if(_padding[i].first > 0 || _padding[i].second > 0)
+ {
+ if(_padding[i].first > 0 && _slice_results[2 * i].info()->total_size() > 0)
+ {
+ _slice_functions[2 * i].run();
+ }
+ if(_padding[i].second > 0 && _slice_results[2 * i + 1].info()->total_size() > 0)
+ {
+ _slice_functions[2 * i + 1].run();
+ }
+ _concat_functions[i].run();
+ }
+ }
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Padding mode not supported.");
+ }
+ }
+ else
+ {
+ NEScheduler::get().schedule(&_copy_kernel, Window::DimY);
+ }
}
} // namespace arm_compute
diff --git a/tests/validation/CL/PadLayer.cpp b/tests/validation/CL/PadLayer.cpp
index 4bbd7b8e14..9430b1212b 100644
--- a/tests/validation/CL/PadLayer.cpp
+++ b/tests/validation/CL/PadLayer.cpp
@@ -94,8 +94,9 @@ TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture<float>, framework::DatasetMode::ALL,
combine(
- combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F32 })),
- PaddingSizesDataset))
+ combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F32 })),
+ PaddingSizesDataset),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -105,8 +106,9 @@ TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture<half>, framework::DatasetMode::ALL,
combine(
- combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F16 })),
- PaddingSizesDataset))
+ combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F16 })),
+ PaddingSizesDataset),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -118,8 +120,9 @@ TEST_SUITE(Integer)
TEST_SUITE(S8)
FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture<int8_t>, framework::DatasetMode::ALL,
combine(
- combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::S8 })),
- PaddingSizesDataset))
+ combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::S8 })),
+ PaddingSizesDataset),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -131,8 +134,9 @@ TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture<uint8_t>, framework::DatasetMode::ALL,
combine(
- combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })),
- PaddingSizesDataset))
+ combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })),
+ PaddingSizesDataset),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT })))
{
// Validate output
validate(CLAccessor(_target), _reference);
diff --git a/tests/validation/NEON/PadLayer.cpp b/tests/validation/NEON/PadLayer.cpp
index 90d3ae98d8..5049347f27 100644
--- a/tests/validation/NEON/PadLayer.cpp
+++ b/tests/validation/NEON/PadLayer.cpp
@@ -42,12 +42,14 @@ namespace validation
{
namespace
{
-const auto PaddingSizesDataset = framework::dataset::make("PaddingSize", { PaddingList{ { 0, 0 } },
+const auto PaddingSizesDataset = framework::dataset::make("PaddingSize",
+{
+ PaddingList{ { 0, 0 } },
PaddingList{ { 1, 1 } },
PaddingList{ { 1, 1 }, { 2, 2 } },
- PaddingList{ { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 } },
- PaddingList{ { 0, 0 }, { 1, 0 }, { 0, 1 }, { 1, 2 } },
- PaddingList{ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 1, 1 } }
+ PaddingList{ { 1, 1 }, { 1, 1 }, { 1, 1 } },
+ PaddingList{ { 0, 0 }, { 1, 0 }, { 0, 1 } },
+ PaddingList{ { 0, 1 }, { 1, 0 }, { 0, 1 } },
});
} // namespace
@@ -57,33 +59,62 @@ TEST_SUITE(PadLayer)
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32)
- }),
- framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
- TensorInfo(TensorShape(28U, 11U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(29U, 17U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(29U, 15U, 4U, 3U), 1, DataType::F32),
- TensorInfo(TensorShape(27U, 14U, 3U, 4U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U, 3U), 1, DataType::F32)
- })),
- framework::dataset::make("PaddingSize", { PaddingList{{0, 0}},
- PaddingList{{1, 1}},
- PaddingList{{1, 1}, {2, 2}},
- PaddingList{{1,1}, {1,1}, {1,1}, {1,1}},
- PaddingList{{0,0}, {1,0}, {0,1}, {1,2}},
- PaddingList{{0,0}, {0,0}, {0,0}, {1,1}}
- })),
- framework::dataset::make("Expected", { false, false, true, true, true, true })),
- input_info, output_info, padding, expected)
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32)
+ }),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
+ TensorInfo(TensorShape(28U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(29U, 17U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(29U, 15U, 4U, 3U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 14U, 3U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U, 3U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
+ TensorInfo(TensorShape(28U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(29U, 17U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(29U, 15U, 4U, 3U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 14U, 3U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U, 3U), 1, DataType::F32)
+ })),
+ framework::dataset::make("PaddingSize", { PaddingList{{0, 0}},
+ PaddingList{{1, 1}},
+ PaddingList{{1, 1}, {2, 2}},
+ PaddingList{{1,1}, {1,1}, {1,1}, {1,1}},
+ PaddingList{{0,0}, {1,0}, {0,1}, {1,2}},
+ PaddingList{{0,0}, {0,0}, {0,0}, {1,1}},
+ PaddingList{{0, 0}},
+ PaddingList{{1, 1}},
+ PaddingList{{1, 1}, {2, 2}},
+ PaddingList{{1,1}, {1,1}, {1,1}, {1,1}},
+ PaddingList{{0,0}, {1,0}, {0,1}, {1,2}},
+ PaddingList{{0,0}, {0,0}, {0,0}, {1,1}}
+ })),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT,
+ PaddingMode::CONSTANT,
+ PaddingMode::CONSTANT,
+ PaddingMode::CONSTANT,
+ PaddingMode::CONSTANT,
+ PaddingMode::CONSTANT,
+ PaddingMode::REFLECT,
+ PaddingMode::REFLECT,
+ PaddingMode::REFLECT,
+ PaddingMode::REFLECT,
+ PaddingMode::REFLECT,
+ PaddingMode::SYMMETRIC })),
+ framework::dataset::make("Expected", { false, false, true, true, true, true, false, false, true, false, false, true })),
+ input_info, output_info, padding, mode, expected)
{
- Status s = NEPadLayer::validate(&input_info.clone()->set_is_resizable(true), &output_info.clone()->set_is_resizable(true), padding);
- ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(NEPadLayer::validate(&input_info.clone()->set_is_resizable(true), &output_info.clone()->set_is_resizable(true), padding, PixelValue(), mode)) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
@@ -96,17 +127,17 @@ TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, NEPaddingFixture<float>, framework::DatasetMode::ALL,
- combine(
- combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F32 })),
- PaddingSizesDataset))
+ combine(combine(combine(datasets::Small3DShapes(), framework::dataset::make("DataType", { DataType::F32 })),
+ PaddingSizesDataset),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT })))
{
// Validate output
validate(Accessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEPaddingFixture<float>, framework::DatasetMode::NIGHTLY,
- combine(
- combine(datasets::LargeShapes(), framework::dataset::make("DataType", { DataType::F32 })),
- PaddingSizesDataset))
+ combine(combine(combine(datasets::Large3DShapes(), framework::dataset::make("DataType", { DataType::F32 })),
+ PaddingSizesDataset),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT, PaddingMode::SYMMETRIC })))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -116,17 +147,17 @@ TEST_SUITE_END() // FP32
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, NEPaddingFixture<half>, framework::DatasetMode::ALL,
- combine(
- combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F16 })),
- PaddingSizesDataset))
+ combine(combine(combine(datasets::Small3DShapes(), framework::dataset::make("DataType", { DataType::F16 })),
+ PaddingSizesDataset),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT })))
{
// Validate output
validate(Accessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEPaddingFixture<half>, framework::DatasetMode::NIGHTLY,
- combine(
- combine(datasets::LargeShapes(), framework::dataset::make("DataType", { DataType::F16 })),
- PaddingSizesDataset))
+ combine(combine(combine(datasets::Large3DShapes(), framework::dataset::make("DataType", { DataType::F16 })),
+ PaddingSizesDataset),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT, PaddingMode::SYMMETRIC })))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -135,41 +166,20 @@ TEST_SUITE_END() // FP16
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE_END() // Float
-TEST_SUITE(Integer)
-TEST_SUITE(S8)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEPaddingFixture<int8_t>, framework::DatasetMode::ALL,
- combine(
- combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::S8 })),
- PaddingSizesDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEPaddingFixture<int8_t>, framework::DatasetMode::NIGHTLY,
- combine(
- combine(datasets::LargeShapes(), framework::dataset::make("DataType", { DataType::S8 })),
- PaddingSizesDataset))
-{
- // Validate output
- validate(Accessor(_target), _reference);
-}
-TEST_SUITE_END() // S8
-TEST_SUITE_END() // Integer
-
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmall, NEPaddingFixture<uint8_t>, framework::DatasetMode::ALL,
- combine(
- combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })),
- PaddingSizesDataset))
+ combine(combine(combine(datasets::Small3DShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })),
+ PaddingSizesDataset),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT })))
{
// Validate output
validate(Accessor(_target), _reference);
}
FIXTURE_DATA_TEST_CASE(RunLarge, NEPaddingFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
- combine(
- combine(datasets::LargeShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })),
- PaddingSizesDataset))
+ combine(combine(combine(datasets::Large3DShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })),
+ PaddingSizesDataset),
+ framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT, PaddingMode::SYMMETRIC })))
{
// Validate output
validate(Accessor(_target), _reference);
diff --git a/tests/validation/fixtures/PadLayerFixture.h b/tests/validation/fixtures/PadLayerFixture.h
index 839313a118..3538cabfeb 100644
--- a/tests/validation/fixtures/PadLayerFixture.h
+++ b/tests/validation/fixtures/PadLayerFixture.h
@@ -45,30 +45,54 @@ class PaddingFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, DataType data_type, const PaddingList &padding)
+ void setup(TensorShape shape, DataType data_type, const PaddingList &padding, const PaddingMode mode)
{
- _target = compute_target(shape, data_type, padding);
- _reference = compute_reference(shape, data_type, padding);
+ PaddingList clamped_padding = padding;
+ if(mode != PaddingMode::CONSTANT)
+ {
+ // Clamp padding to prevent applying more than is possible.
+ for(uint32_t i = 0; i < padding.size(); ++i)
+ {
+ if(mode == PaddingMode::REFLECT)
+ {
+ clamped_padding[i].first = std::min(static_cast<uint64_t>(padding[i].first), static_cast<uint64_t>(shape[i] - 1));
+ clamped_padding[i].second = std::min(static_cast<uint64_t>(padding[i].second), static_cast<uint64_t>(shape[i] - 1));
+ }
+ else
+ {
+ clamped_padding[i].first = std::min(static_cast<uint64_t>(padding[i].first), static_cast<uint64_t>(shape[i]));
+ clamped_padding[i].second = std::min(static_cast<uint64_t>(padding[i].second), static_cast<uint64_t>(shape[i]));
+ }
+ }
+ }
+ _target = compute_target(shape, data_type, clamped_padding, mode);
+ _reference = compute_reference(shape, data_type, clamped_padding, mode);
}
protected:
template <typename U>
- void fill(U &&tensor)
+ void fill(U &&tensor, int i)
{
- library->fill_tensor_uniform(tensor, 0);
+ library->fill_tensor_uniform(tensor, i);
}
TensorType compute_target(const TensorShape &shape,
DataType data_type,
- const PaddingList &paddings)
+ const PaddingList &paddings,
+ const PaddingMode mode)
{
// Create tensors
TensorType src = create_tensor<TensorType>(shape, data_type);
TensorType dst;
+ TensorType const_val = create_tensor<TensorType>(TensorShape(1), data_type);
+ const_val.allocator()->allocate();
+ fill(AccessorType(const_val), 1);
+ T const_value = *static_cast<T *>(AccessorType(const_val)(Coordinates(0)));
+
// Create and configure function
FunctionType padding;
- padding.configure(&src, &dst, paddings);
+ padding.configure(&src, &dst, paddings, const_value, mode);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -81,7 +105,7 @@ protected:
ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
// Fill tensors
- fill(AccessorType(src));
+ fill(AccessorType(src), 0);
// Compute function
padding.run();
@@ -90,15 +114,17 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type,
- const PaddingList &paddings)
+ const PaddingList &paddings, const PaddingMode mode)
{
// Create reference tensor
SimpleTensor<T> src{ shape, data_type };
+ SimpleTensor<T> const_val{ TensorShape(1), data_type };
// Fill reference tensor
- fill(src);
+ fill(src, 0);
+ fill(const_val, 1);
- return reference::pad_layer(src, paddings);
+ return reference::pad_layer(src, paddings, const_val[0], mode);
}
TensorType _target{};
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index f2cf606a00..7c23399bc1 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -1183,6 +1183,46 @@ inline ::std::ostream &operator<<(::std::ostream &os, const Rectangle &rect)
return os;
}
+/** Formatted output of the PaddingMode type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] mode Type to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const PaddingMode &mode)
+{
+ switch(mode)
+ {
+ case PaddingMode::CONSTANT:
+ os << "CONSTANT";
+ break;
+ case PaddingMode::REFLECT:
+ os << "REFLECT";
+ break;
+ case PaddingMode::SYMMETRIC:
+ os << "SYMMETRIC";
+ break;
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+
+ return os;
+}
+
+/** Formatted output of the PaddingMode type.
+ *
+ * @param[in] mode Type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const PaddingMode &mode)
+{
+ std::stringstream str;
+ str << mode;
+ return str.str();
+}
+
/** Formatted output of the PadStrideInfo type.
*
* @param[out] os Output stream.