aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/Types.h34
-rw-r--r--examples/graph_squeezenet.cpp2
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp37
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp26
-rw-r--r--src/core/NEON/kernels/NEPoolingLayerKernel.cpp36
-rw-r--r--tests/validation/CL/PoolingLayer.cpp11
-rw-r--r--tests/validation/CPP/PoolingLayer.cpp13
-rw-r--r--tests/validation/fixtures/PoolingLayerFixture.h20
-rw-r--r--utils/TypePrinter.h10
9 files changed, 124 insertions, 65 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 6e7eb3c829..b0a284fe69 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -550,20 +550,35 @@ private:
class PoolingLayerInfo
{
public:
+ /** Default Constructor */
+ PoolingLayerInfo()
+ : _pool_type(PoolingType::MAX), _pool_size(0), _pad_stride_info(PadStrideInfo()), _exclude_padding(false), _is_global_pooling(false)
+ {
+ }
/** Default Constructor
*
- * @param[in] pool_type Pooling type @ref PoolingType. Defaults to @ref PoolingType::MAX
- * @param[in] pool_size (Optional) Pooling size, in elements, across x and y. Defaults to 2.
+ * @param[in] pool_type Pooling type @ref PoolingType.
+ * @param[in] pool_size Pooling size, in elements, across x and y.
* @param[in] pad_stride_info (Optional) Padding and stride information @ref PadStrideInfo
* @param[in] exclude_padding (Optional) Strategy when accounting padding in calculations.
* True will exclude padding while false will not (Used in AVG/L2 pooling to determine the pooling area).
* Defaults to false;
*/
- PoolingLayerInfo(PoolingType pool_type = PoolingType::MAX,
- unsigned int pool_size = 2,
- PadStrideInfo pad_stride_info = PadStrideInfo(),
- bool exclude_padding = false)
- : _pool_type(pool_type), _pool_size(pool_size), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding)
+ explicit PoolingLayerInfo(PoolingType pool_type,
+ unsigned int pool_size,
+ PadStrideInfo pad_stride_info = PadStrideInfo(),
+ bool exclude_padding = false)
+ : _pool_type(pool_type), _pool_size(pool_size), _pad_stride_info(pad_stride_info), _exclude_padding(exclude_padding), _is_global_pooling(false)
+ {
+ }
+ /** Default Constructor
+ *
+ * @note This constructor is used for global pooling
+ *
+ * @param[in] pool_type Pooling type @ref PoolingType.
+ */
+ explicit PoolingLayerInfo(PoolingType pool_type)
+ : _pool_type(pool_type), _pool_size(0), _pad_stride_info(PadStrideInfo(1, 1, 0, 0)), _exclude_padding(false), _is_global_pooling(true)
{
}
PoolingType pool_type() const
@@ -582,12 +597,17 @@ public:
{
return _exclude_padding;
}
+ bool is_global_pooling() const
+ {
+ return _is_global_pooling;
+ }
private:
PoolingType _pool_type;
unsigned int _pool_size;
PadStrideInfo _pad_stride_info;
bool _exclude_padding;
+ bool _is_global_pooling;
};
/** ROI Pooling Layer Information class */
diff --git a/examples/graph_squeezenet.cpp b/examples/graph_squeezenet.cpp
index 195fe0addb..d38cec28c4 100644
--- a/examples/graph_squeezenet.cpp
+++ b/examples/graph_squeezenet.cpp
@@ -190,7 +190,7 @@ void main_graph_squeezenet(int argc, const char **argv)
get_weights_accessor(data_path, "/cnn_data/squeezenet_v1.0_model/conv10_b.npy"),
PadStrideInfo(1, 1, 0, 0))
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
- << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 13, PadStrideInfo(1, 1, 0, 0, DimensionRoundingType::CEIL)))
+ << PoolingLayer(PoolingLayerInfo(PoolingType::AVG))
<< FlattenLayer()
<< SoftmaxLayer()
<< Tensor(get_output_accessor(label, 5));
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index 1317278fb5..e436c46d29 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -53,21 +53,25 @@ BorderSize CLPoolingLayerKernel::border_size() const
void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info)
{
- int pool_pad_x = 0;
- int pool_pad_y = 0;
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
- const PoolingType pool_type = pool_info.pool_type();
- const int pool_size = pool_info.pool_size();
- const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
- bool exclude_padding = pool_info.exclude_padding();
+ int pool_pad_x = 0;
+ int pool_pad_y = 0;
+ int pool_stride_x = 0;
+ int pool_stride_y = 0;
+ unsigned int pooled_w = 0;
+ unsigned int pooled_h = 0;
+ const PoolingType pool_type = pool_info.pool_type();
+ int pool_size = pool_info.pool_size();
+ const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
+ const bool exclude_padding = pool_info.exclude_padding();
+ const bool is_global_pooling = pool_info.is_global_pooling();
std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad();
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ // Update pool size in case of global pooling
+ pool_size = is_global_pooling ? input->info()->dimension(0) : pool_size;
+
// Check output dimensions
std::tie(pooled_w, pooled_h) = scaled_dimensions(input->info()->dimension(0),
input->info()->dimension(1),
@@ -188,11 +192,12 @@ Error CLPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo
ARM_COMPUTE_RETURN_ERROR_ON_MSG((is_data_type_quantized_asymmetric(input->data_type()) && pool_info.pool_type() == PoolingType::L2),
"Unsupported combination of parameters!");
- int pool_pad_x = 0;
- int pool_pad_y = 0;
- int pool_size = pool_info.pool_size();
- std::tie(pool_pad_x, pool_pad_y) = pool_info.pad_stride_info().pad();
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(((pool_pad_x >= pool_size) || (pool_pad_y >= pool_size)),
+ const bool is_global_pooling = pool_info.is_global_pooling();
+ const unsigned int pool_size = is_global_pooling ? input->tensor_shape().x() : pool_info.pool_size();
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_global_pooling && (input->tensor_shape().x() != input->tensor_shape().y()),
+ "Global pooling is supported only with rectangular inputs!");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_global_pooling && ((pool_info.pad_stride_info().pad().first >= pool_size) || (pool_info.pad_stride_info().pad().second >= pool_size)),
"Invalid pool size and pool pad combination!");
// Checks performed when output is configured
@@ -208,7 +213,7 @@ Error CLPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo
pool_size,
pool_size,
pool_info.pad_stride_info());
- ARM_COMPUTE_RETURN_ERROR_ON_MSG((output->dimension(0) != pooled_w) != (output->dimension(1) != pooled_h),
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((output->dimension(0) != pooled_w) || (output->dimension(1) != pooled_h),
"Invalid output pooling dimensions!");
}
diff --git a/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp
index c877da3783..073c3961f2 100644
--- a/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp
@@ -52,22 +52,26 @@ BorderSize GCPoolingLayerKernel::border_size() const
void GCPoolingLayerKernel::configure(const IGCTensor *input, IGCTensor *output, const PoolingLayerInfo &pool_info)
{
- int pool_pad_x = 0;
- int pool_pad_y = 0;
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
- const PoolingType pool_type = pool_info.pool_type();
- const int pool_size = pool_info.pool_size();
- const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
+ int pool_pad_x = 0;
+ int pool_pad_y = 0;
+ int pool_stride_x = 0;
+ int pool_stride_y = 0;
+ unsigned int pooled_w = 0;
+ unsigned int pooled_h = 0;
+ const PoolingType pool_type = pool_info.pool_type();
+ int pool_size = pool_info.pool_size();
+ const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
+ const bool is_global_pooling = pool_info.is_global_pooling();
std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad();
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
- ARM_COMPUTE_ERROR_ON(pool_pad_x >= pool_size || pool_pad_y >= pool_size);
- ARM_COMPUTE_ERROR_ON(pool_size > 7 && is_data_type_fixed_point(input->info()->data_type()));
+ ARM_COMPUTE_ERROR_ON(!is_global_pooling && (pool_pad_x >= pool_size || pool_pad_y >= pool_size));
+ ARM_COMPUTE_ERROR_ON(is_global_pooling && (input->info()->tensor_shape().x() != input->info()->tensor_shape().y()));
+
+ // Update pool size in case of global pooling
+ pool_size = is_global_pooling ? input->info()->dimension(0) : pool_size;
// Check output dimensions
std::tie(pooled_w, pooled_h) = scaled_dimensions(input->info()->dimension(0),
diff --git a/src/core/NEON/kernels/NEPoolingLayerKernel.cpp b/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
index 3ea5bb5870..0e06704666 100644
--- a/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
@@ -102,16 +102,17 @@ BorderSize NEPoolingLayerKernel::border_size() const
void NEPoolingLayerKernel::configure(const ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
{
- int pool_pad_x = 0;
- int pool_pad_y = 0;
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
- PoolingType pool_type = pool_info.pool_type();
- int pool_size = pool_info.pool_size();
- const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
- bool exclude_padding = pool_info.exclude_padding();
+ int pool_pad_x = 0;
+ int pool_pad_y = 0;
+ int pool_stride_x = 0;
+ int pool_stride_y = 0;
+ unsigned int pooled_w = 0;
+ unsigned int pooled_h = 0;
+ PoolingType pool_type = pool_info.pool_type();
+ int pool_size = pool_info.pool_size();
+ const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
+ const bool exclude_padding = pool_info.exclude_padding();
+ const bool is_global_pooling = pool_info.is_global_pooling();
std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad();
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
@@ -122,13 +123,20 @@ void NEPoolingLayerKernel::configure(const ITensor *input, ITensor *output, cons
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON(pool_type == PoolingType::L2 && is_data_type_fixed_point(input->info()->data_type()));
ARM_COMPUTE_ERROR_ON((supported_pool_sizes.find(pool_size) == supported_pool_sizes.end()) && (input->info()->data_type() != DataType::F32));
- ARM_COMPUTE_ERROR_ON(pool_pad_x >= pool_size || pool_pad_y >= pool_size);
+ ARM_COMPUTE_ERROR_ON(!is_global_pooling && (pool_pad_x >= pool_size || pool_pad_y >= pool_size));
+ ARM_COMPUTE_ERROR_ON(is_global_pooling && (input->info()->tensor_shape().x() != input->info()->tensor_shape().y()));
ARM_COMPUTE_ERROR_ON(is_data_type_fixed_point(input->info()->data_type()) && pool_stride_x > 2);
ARM_COMPUTE_ERROR_ON(exclude_padding && is_data_type_fixed_point(input->info()->data_type()));
+ // Update pool size in case of global pooling
+ pool_size = is_global_pooling ? input->info()->dimension(0) : pool_size;
+
// Check output dimensions
- std::tie(pooled_w, pooled_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1),
- pool_size, pool_size, pool_info.pad_stride_info());
+ std::tie(pooled_w, pooled_h) = scaled_dimensions(input->info()->dimension(0),
+ input->info()->dimension(1),
+ pool_size,
+ pool_size,
+ pool_info.pad_stride_info());
// Output auto initialization if not yet initialized
{
@@ -1031,7 +1039,7 @@ void NEPoolingLayerKernel::poolingN_f32(const Window &window_input, const Window
Iterator input(_input, window_input);
Iterator output(_output, window);
- const int pool_size = _pool_info.pool_size();
+ const int pool_size = _pool_info.is_global_pooling() ? _input->info()->tensor_shape().x() : _pool_info.pool_size();
int pool_pad_x = 0;
int pool_pad_y = 0;
int pool_stride_x = 0;
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index b3d56122db..c7c3b4192e 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -78,6 +78,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0), // Invalid parameters
+ TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0), // Non-rectangular Global Pooling
+ TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0), // Invalid output Global Pooling
+ TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0),
TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0),
@@ -86,6 +89,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8, 0),
+ TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0),
})),
framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)),
@@ -94,8 +100,11 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 2, 0)),
PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 0, 2)),
PoolingLayerInfo(PoolingType::L2, 3, PadStrideInfo(1, 1, 0, 0)),
+ PoolingLayerInfo(PoolingType::AVG),
+ PoolingLayerInfo(PoolingType::MAX),
+ PoolingLayerInfo(PoolingType::AVG),
})),
- framework::dataset::make("Expected", { true, false, true, false, true, true, true })),
+ framework::dataset::make("Expected", { true, false, true, false, true, true, true, true, true, false })),
input_info, output_info, pool_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info, &output_info, pool_info)) == expected, framework::LogLevel::ERRORS);
diff --git a/tests/validation/CPP/PoolingLayer.cpp b/tests/validation/CPP/PoolingLayer.cpp
index 90a48e0c44..1a7dd4cbb7 100644
--- a/tests/validation/CPP/PoolingLayer.cpp
+++ b/tests/validation/CPP/PoolingLayer.cpp
@@ -40,10 +40,11 @@ namespace
TensorShape calculate_output_shape(TensorShape shape, PoolingLayerInfo info)
{
TensorShape dst_shape = shape;
+ const int pool_size = info.is_global_pooling() ? shape.x() : info.pool_size();
const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(shape.x(),
shape.y(),
- info.pool_size(),
- info.pool_size(),
+ pool_size,
+ pool_size,
info.pad_stride_info());
dst_shape.set(0, scaled_dims.first);
dst_shape.set(1, scaled_dims.second);
@@ -55,7 +56,9 @@ TensorShape calculate_output_shape(TensorShape shape, PoolingLayerInfo info)
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
{
- const int pool_size = info.pool_size();
+ ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
+
+ const int pool_size = info.is_global_pooling() ? src.shape().x() : info.pool_size();
PoolingType type = info.pool_type();
int pool_stride_x = info.pad_stride_info().stride().first;
int pool_stride_y = info.pad_stride_info().stride().second;
@@ -164,7 +167,9 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
{
- const int pool_size = info.pool_size();
+ ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
+
+ const int pool_size = info.is_global_pooling() ? src.shape().x() : info.pool_size();
PoolingType type = info.pool_type();
int pool_stride_x = info.pad_stride_info().stride().first;
int pool_stride_y = info.pad_stride_info().stride().second;
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index d6190e2977..14192517fc 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -47,15 +47,14 @@ class PoolingLayerValidationGenericFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding,
- DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+ void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
{
_fractional_bits = fractional_bits;
_quantization_info = quantization_info;
- PoolingLayerInfo info(pool_type, pool_size, pad_stride_info, exclude_padding);
+ _pool_info = pool_info;
- _target = compute_target(shape, info, data_type, fractional_bits, quantization_info);
- _reference = compute_reference(shape, info, data_type, fractional_bits, quantization_info);
+ _target = compute_target(shape, pool_info, data_type, fractional_bits, quantization_info);
+ _reference = compute_reference(shape, pool_info, data_type, fractional_bits, quantization_info);
}
protected:
@@ -125,6 +124,7 @@ protected:
SimpleTensor<T> _reference{};
int _fractional_bits{};
QuantizationInfo _quantization_info{};
+ PoolingLayerInfo _pool_info{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
@@ -134,7 +134,7 @@ public:
template <typename...>
void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type)
{
- PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, pool_type, pool_size, pad_stride_info, exclude_padding,
+ PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding),
data_type, 0, QuantizationInfo());
}
};
@@ -146,7 +146,7 @@ public:
template <typename...>
void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, int fractional_bits)
{
- PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, pool_type, pool_size, pad_stride_info, exclude_padding,
+ PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding),
data_type, fractional_bits, QuantizationInfo());
}
};
@@ -158,19 +158,19 @@ public:
template <typename...>
void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, QuantizationInfo quantization_info)
{
- PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, pool_type, pool_size, pad_stride_info, exclude_padding,
+ PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding),
data_type, 0, quantization_info);
}
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class GlobalPoolingLayerValidationFixture : public PoolingLayerValidationFixture<TensorType, AccessorType, FunctionType, T>
+class GlobalPoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
void setup(TensorShape shape, PoolingType pool_type, DataType data_type)
{
- PoolingLayerValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, pool_type, shape.x(), PadStrideInfo(1, 1, 0, 0), true, data_type);
+ PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type), data_type, 0, QuantizationInfo());
}
};
} // namespace validation
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 2d5c618bce..863fcaf6d9 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -727,7 +727,15 @@ inline std::string to_string(const PoolingType &type)
inline std::string to_string(const PoolingLayerInfo &info)
{
std::stringstream str;
- str << info.pool_type();
+ str << "{Type=" << info.pool_type() << ","
+ << "IsGlobalPooling=" << info.is_global_pooling();
+ if(!info.is_global_pooling())
+ {
+ str << ","
+ << "PoolSize=" << info.pool_size() << ","
+ << "PadStride=" << info.pad_stride_info();
+ }
+ str << "}";
return str.str();
}