aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/Types.h28
-rw-r--r--arm_compute/core/Utils.h20
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp24
-rw-r--r--src/core/NEON/kernels/NEPoolingLayerKernel.cpp22
-rw-r--r--src/core/Utils.cpp25
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp7
-rw-r--r--src/runtime/CL/functions/CLLocallyConnectedLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NEConvolutionLayer.cpp7
-rw-r--r--src/runtime/NEON/functions/NELocallyConnectedLayer.cpp4
-rw-r--r--tests/model_objects/AlexNet.h16
-rw-r--r--tests/validation/CL/PoolingLayer.cpp5
-rw-r--r--tests/validation/NEON/ConvolutionLayerDirect.cpp5
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp5
13 files changed, 88 insertions, 84 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 725567b9ae..b7a30a5634 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -563,32 +563,42 @@ private:
class WeightsInfo
{
public:
+ /** Default constructor */
WeightsInfo()
- : _are_reshaped(false), _kernel_size(0)
+ : _are_reshaped(false), _kernel_width(0), _kernel_height(0)
{
}
/** Constructor
*
- * @param[in] are_reshaped True if the weights have been reshaped
- * @param[in] kernel_size The size of the kernel.
+ * @param[in] are_reshaped True if the weights have been reshaped
+ * @param[in] kernel_width Kernel width.
+ * @param[in] kernel_height Kernel height.
*/
- WeightsInfo(bool are_reshaped, unsigned int kernel_size)
- : _are_reshaped(are_reshaped), _kernel_size(kernel_size)
+ WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height)
+ : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height)
{
}
-
+ /** Flag which specifies if the weights tensor has been reshaped.
+ *
+ * @return True if the weights tensors has been reshaped
+ */
bool are_reshaped() const
{
return _are_reshaped;
};
- unsigned int kernel_size() const
+ /** Return the width and height of the kernel
+ *
+ * @return The width and height of the kernel
+ */
+ std::pair<unsigned int, unsigned int> kernel_size() const
{
- return _kernel_size;
+ return std::make_pair(_kernel_width, _kernel_height);
}
private:
const bool _are_reshaped;
- const unsigned int _kernel_size;
+ const unsigned int _kernel_width;
+ const unsigned int _kernel_height;
};
/** IO formatting information class*/
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index 9d3ff0a1bd..c2f0e3982a 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -542,21 +542,17 @@ inline DataType data_type_for_convolution_matrix(const int16_t *conv, size_t siz
/** Returns expected width and height of output scaled tensor depending on dimensions rounding mode.
*
- * @param[in] width Width of input tensor (Number of columns)
- * @param[in] height Height of input tensor (Number of rows)
- * @param[in] kernel_size Kernel size.
- * @param[in] stride_x Stride of the operation in the x dimension.
- * @param[in] stride_y Stride of the operation in the y dimension.
- * @param[in] pad_x Padding size in the x dimension.
- * @param[in] pad_y Padding size in the y dimension.
- * @param[in] round_type Dimensions rounding mode.
+ * @param[in] width Width of input tensor (Number of columns)
+ * @param[in] height Height of input tensor (Number of rows)
+ * @param[in] kernel_width Kernel width.
+ * @param[in] kernel_height Kernel height.
+ * @param[in] pad_stride_info Pad and stride information.
*
* @return A pair with the new width in the first position and the new height in the second.
*/
-const std::pair<unsigned int, unsigned int> scaled_dimensions(unsigned int width, unsigned int height, unsigned int kernel_size,
- unsigned int stride_x, unsigned int stride_y,
- unsigned int pad_x, unsigned int pad_y,
- DimensionRoundingType round_type);
+const std::pair<unsigned int, unsigned int> scaled_dimensions(unsigned int width, unsigned int height,
+ unsigned int kernel_width, unsigned int kernel_height,
+ const PadStrideInfo &pad_stride_info);
/** Convert a tensor format into a string.
*
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index 7648025caa..15f26e3985 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -52,16 +52,15 @@ BorderSize CLPoolingLayerKernel::border_size() const
void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info)
{
- int pool_pad_x = 0;
- int pool_pad_y = 0;
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
- const PoolingType pool_type = pool_info.pool_type();
- const int pool_size = pool_info.pool_size();
- const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
- DimensionRoundingType pool_round = pad_stride_info.round();
+ int pool_pad_x = 0;
+ int pool_pad_y = 0;
+ int pool_stride_x = 0;
+ int pool_stride_y = 0;
+ unsigned int pooled_w = 0;
+ unsigned int pooled_h = 0;
+ const PoolingType pool_type = pool_info.pool_type();
+ const int pool_size = pool_info.pool_size();
+ const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad();
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
@@ -78,9 +77,8 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
std::tie(pooled_w, pooled_h) = scaled_dimensions(input->info()->dimension(0),
input->info()->dimension(1),
pool_size,
- pool_stride_x, pool_stride_y,
- pool_pad_x, pool_pad_y,
- pool_round);
+ pool_size,
+ pool_info.pad_stride_info());
ARM_COMPUTE_UNUSED(pooled_w);
ARM_COMPUTE_UNUSED(pooled_h);
ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) != pooled_w) || (output->info()->dimension(1) != pooled_h));
diff --git a/src/core/NEON/kernels/NEPoolingLayerKernel.cpp b/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
index 8991e9b9ee..578b01419d 100644
--- a/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
@@ -81,16 +81,15 @@ BorderSize NEPoolingLayerKernel::border_size() const
void NEPoolingLayerKernel::configure(const ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
{
- int pool_pad_x = 0;
- int pool_pad_y = 0;
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
- PoolingType pool_type = pool_info.pool_type();
- int pool_size = pool_info.pool_size();
- const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
- DimensionRoundingType pool_round = pad_stride_info.round();
+ int pool_pad_x = 0;
+ int pool_pad_y = 0;
+ int pool_stride_x = 0;
+ int pool_stride_y = 0;
+ unsigned int pooled_w = 0;
+ unsigned int pooled_h = 0;
+ PoolingType pool_type = pool_info.pool_type();
+ int pool_size = pool_info.pool_size();
+ const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad();
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
@@ -109,8 +108,7 @@ void NEPoolingLayerKernel::configure(const ITensor *input, ITensor *output, cons
// Check output dimensions
std::tie(pooled_w, pooled_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1),
- pool_size, pool_stride_x, pool_stride_y,
- pool_pad_x, pool_pad_y, pool_round);
+ pool_size, pool_size, pool_info.pad_stride_info());
ARM_COMPUTE_UNUSED(pooled_w);
ARM_COMPUTE_UNUSED(pooled_h);
ARM_COMPUTE_ERROR_ON((output->info()->dimension(0) != pooled_w) || (output->info()->dimension(1) != pooled_h));
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index bf005c12f6..f6230c0199 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -233,22 +233,25 @@ std::string arm_compute::lower_string(const std::string &val)
return res;
}
-const std::pair<unsigned int, unsigned int> arm_compute::scaled_dimensions(unsigned int width, unsigned int height, unsigned int kernel_size,
- unsigned int stride_x, unsigned int stride_y,
- unsigned int pad_x, unsigned int pad_y,
- DimensionRoundingType round_type)
+const std::pair<unsigned int, unsigned int> arm_compute::scaled_dimensions(unsigned int width, unsigned int height,
+ unsigned int kernel_width, unsigned int kernel_height,
+ const PadStrideInfo &pad_stride_info)
{
- unsigned int w = 0;
- unsigned int h = 0;
- switch(round_type)
+ const unsigned int pad_x = pad_stride_info.pad().first;
+ const unsigned int pad_y = pad_stride_info.pad().second;
+ const unsigned int stride_x = pad_stride_info.stride().first;
+ const unsigned int stride_y = pad_stride_info.stride().second;
+ unsigned int w = 0;
+ unsigned int h = 0;
+ switch(pad_stride_info.round())
{
case DimensionRoundingType::FLOOR:
- w = static_cast<unsigned int>(std::floor((static_cast<float>(width + 2 * pad_x - kernel_size) / stride_x) + 1));
- h = static_cast<unsigned int>(std::floor((static_cast<float>(height + 2 * pad_y - kernel_size) / stride_y) + 1));
+ w = static_cast<unsigned int>(std::floor((static_cast<float>(width + 2 * pad_x - kernel_width) / stride_x) + 1));
+ h = static_cast<unsigned int>(std::floor((static_cast<float>(height + 2 * pad_y - kernel_height) / stride_y) + 1));
break;
case DimensionRoundingType::CEIL:
- w = static_cast<unsigned int>(std::ceil((static_cast<float>(width + 2 * pad_x - kernel_size) / stride_x) + 1));
- h = static_cast<unsigned int>(std::ceil((static_cast<float>(height + 2 * pad_y - kernel_size) / stride_y) + 1));
+ w = static_cast<unsigned int>(std::ceil((static_cast<float>(width + 2 * pad_x - kernel_width) / stride_x) + 1));
+ h = static_cast<unsigned int>(std::ceil((static_cast<float>(height + 2 * pad_y - kernel_height) / stride_y) + 1));
break;
default:
ARM_COMPUTE_ERROR("Unsupported rounding type");
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index f0bbc3514f..933d92bef7 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -127,9 +127,10 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
unsigned int conv_w = 0;
unsigned int conv_h = 0;
- const unsigned int kernel_width = _are_weights_reshaped ? weights_info.kernel_size() : weights->info()->dimension(0);
- std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_width,
- stride_x, stride_y, pad_x, pad_y, conv_info.round());
+ const unsigned int kernel_width = _are_weights_reshaped ? weights_info.kernel_size().first : weights->info()->dimension(0);
+ const unsigned int kernel_height = _are_weights_reshaped ? weights_info.kernel_size().second : weights->info()->dimension(1);
+ std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_width, kernel_height,
+ conv_info);
ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
// Check if its a "fully connected" convolution
diff --git a/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp b/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
index 263fb51987..0e6d23e0d8 100644
--- a/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
@@ -68,8 +68,8 @@ void CLLocallyConnectedLayer::configure(const ICLTensor *input, const ICLTensor
// Get convolved dimensions
unsigned int conv_w = 0;
unsigned int conv_h = 0;
- std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights->info()->dimension(0),
- stride_x, stride_y, pad_x, pad_y, conv_info.round());
+ std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights->info()->dimension(0), weights->info()->dimension(1),
+ conv_info);
ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
ARM_COMPUTE_ERROR_ON_MSG(weights->info()->dimension(4) != (conv_w * conv_h), "Weights shape does not match the expected one");
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index 82c33d54bb..b38d6617d5 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -131,9 +131,10 @@ void NEConvolutionLayer::configure(const ITensor *input, const ITensor *weights,
unsigned int conv_w = 0;
unsigned int conv_h = 0;
- const unsigned int kernel_width = (_are_weights_reshaped) ? weights_info.kernel_size() : weights->info()->dimension(0);
- std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_width,
- stride_x, stride_y, pad_x, pad_y, conv_info.round());
+ const unsigned int kernel_width = (_are_weights_reshaped) ? weights_info.kernel_size().first : weights->info()->dimension(0);
+ const unsigned int kernel_height = (_are_weights_reshaped) ? weights_info.kernel_size().second : weights->info()->dimension(1);
+ std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_width, kernel_height,
+ conv_info);
ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
// Check if its a "fully connected" convolution
diff --git a/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp b/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp
index 85d7ba3650..3b095b4688 100644
--- a/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp
@@ -68,8 +68,8 @@ void NELocallyConnectedLayer::configure(const ITensor *input, const ITensor *wei
// Get convolved dimensions
unsigned int conv_w = 0;
unsigned int conv_h = 0;
- std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights->info()->dimension(0),
- stride_x, stride_y, pad_x, pad_y, conv_info.round());
+ std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights->info()->dimension(0), weights->info()->dimension(1),
+ conv_info);
ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
ARM_COMPUTE_ERROR_ON_MSG(weights->info()->dimension(4) != (conv_w * conv_h), "Weights shape does not match the expected one");
diff --git a/tests/model_objects/AlexNet.h b/tests/model_objects/AlexNet.h
index d49ef0645a..8443e9c97e 100644
--- a/tests/model_objects/AlexNet.h
+++ b/tests/model_objects/AlexNet.h
@@ -250,26 +250,26 @@ public:
// Configure Layers
{
// Layer 1
- conv1->configure(&input, w[0].get(), b[0].get(), &conv1_out, PadStrideInfo(4, 4, 0, 0), WeightsInfo(_reshaped_weights, 11U));
+ conv1->configure(&input, w[0].get(), b[0].get(), &conv1_out, PadStrideInfo(4, 4, 0, 0), WeightsInfo(_reshaped_weights, 11U, 11U));
act1->configure(&conv1_out, &act1_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
norm1->configure(&act1_out, &norm1_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
pool1->configure(&norm1_out, &pool1_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
// Layer 2
- conv21->configure(pool11_out.get(), w21.get(), b21.get(), conv21_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U));
- conv22->configure(pool12_out.get(), w22.get(), b22.get(), conv22_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U));
+ conv21->configure(pool11_out.get(), w21.get(), b21.get(), conv21_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U));
+ conv22->configure(pool12_out.get(), w22.get(), b22.get(), conv22_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U));
act2->configure(&conv2_out, &act2_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
norm2->configure(&act2_out, &norm2_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
pool2->configure(&norm2_out, &pool2_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
// Layer 3
- conv3->configure(&pool2_out, w[2].get(), b[2].get(), &conv3_out, PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U));
+ conv3->configure(&pool2_out, w[2].get(), b[2].get(), &conv3_out, PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
act3->configure(&conv3_out, &act3_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
// Layer 4
- conv41->configure(act31_out.get(), w41.get(), b41.get(), conv41_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U));
- conv42->configure(act32_out.get(), w42.get(), b42.get(), conv42_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U));
+ conv41->configure(act31_out.get(), w41.get(), b41.get(), conv41_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
+ conv42->configure(act32_out.get(), w42.get(), b42.get(), conv42_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
act4->configure(&conv4_out, &act4_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
// Layer 5
- conv51->configure(act41_out.get(), w51.get(), b51.get(), conv51_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U));
- conv52->configure(act42_out.get(), w52.get(), b52.get(), conv52_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U));
+ conv51->configure(act41_out.get(), w51.get(), b51.get(), conv51_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
+ conv52->configure(act42_out.get(), w52.get(), b52.get(), conv52_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
act5->configure(&conv5_out, &act5_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
pool5->configure(&act5_out, &pool5_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
// Layer 6
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index 1d0e745088..dabc21eaa8 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -82,9 +82,8 @@ TensorShape get_output_shape(TensorShape in_shape, const PoolingLayerInfo &pool_
const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(in_shape.x(),
in_shape.y(),
pool_info.pool_size(),
- pool_info.pad_stride_info().stride().first, pool_info.pad_stride_info().stride().second,
- pool_info.pad_stride_info().pad().first, pool_info.pad_stride_info().pad().second,
- pool_info.pad_stride_info().round());
+ pool_info.pool_size(),
+ pool_info.pad_stride_info());
out_shape.set(0, scaled_dims.first);
out_shape.set(1, scaled_dims.second);
return out_shape;
diff --git a/tests/validation/NEON/ConvolutionLayerDirect.cpp b/tests/validation/NEON/ConvolutionLayerDirect.cpp
index 4e36e331bd..3ddb010e74 100644
--- a/tests/validation/NEON/ConvolutionLayerDirect.cpp
+++ b/tests/validation/NEON/ConvolutionLayerDirect.cpp
@@ -116,9 +116,8 @@ TensorShape get_output_shape(TensorShape in_shape, TensorShape kernel_shape, con
const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(in_shape.x(),
in_shape.y(),
kernel_shape.x(),
- conv_info.stride().first, conv_info.stride().second,
- conv_info.pad().first, conv_info.pad().second,
- conv_info.round());
+ kernel_shape.y(),
+ conv_info);
out_shape.set(0, scaled_dims.first);
out_shape.set(1, scaled_dims.second);
out_shape.set(2, kernel_shape[3]);
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index 489c5b668b..b29cd3e2e8 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -99,9 +99,8 @@ TensorShape get_output_shape(TensorShape in_shape, const PoolingLayerInfo &pool_
const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(in_shape.x(),
in_shape.y(),
pool_info.pool_size(),
- pool_info.pad_stride_info().stride().first, pool_info.pad_stride_info().stride().second,
- pool_info.pad_stride_info().pad().first, pool_info.pad_stride_info().pad().second,
- pool_info.pad_stride_info().round());
+ pool_info.pool_size(),
+ pool_info.pad_stride_info());
out_shape.set(0, scaled_dims.first);
out_shape.set(1, scaled_dims.second);
return out_shape;