aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorSuhail Munshi <MohammedSuhail.Munshi@arm.com>2021-02-09 16:31:00 +0000
committerMohmun02 <MohammedSuhail.Munshi@arm.com>2021-03-19 16:24:35 +0000
commitab8408872f49c9429c84d83de665c55e31a500b2 (patch)
treeca67bfa1722091de8d4e93803ad8267e15ef6462 /tests
parenta50f19346c5b79e2743f882ce0c691c07076f207 (diff)
downloadComputeLibrary-ab8408872f49c9429c84d83de665c55e31a500b2.tar.gz
Added Qasymm8 datatype support to NEROIPoolingLayer with Tests
Tests added to check ROIPooling Layer against reference with both Float32 and Qasymm8 input. Resolves : COMPMID-2319 Change-Id: I867bc4dde1e3e91f9f42f4a7ce8debfe83b8db50 Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com> Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/296640 Tested-by: bsgcomp <bsgcomp@arm.com> Reviewed-by: Pablo Tello <pablo.tello@arm.com> Comments-Addressed: Pablo Tello <pablo.tello@arm.com> Signed-off-by: Suhail Munshi <MohammedSuhail.Munshi@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5060 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/NEON/ROIPoolingLayer.cpp142
-rw-r--r--tests/validation/fixtures/ROIPoolingLayerFixture.h202
-rw-r--r--tests/validation/reference/ROIPoolingLayer.cpp147
-rw-r--r--tests/validation/reference/ROIPoolingLayer.h46
4 files changed, 537 insertions, 0 deletions
diff --git a/tests/validation/NEON/ROIPoolingLayer.cpp b/tests/validation/NEON/ROIPoolingLayer.cpp
new file mode 100644
index 0000000000..8b5147e57f
--- /dev/null
+++ b/tests/validation/NEON/ROIPoolingLayer.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEROIPoolingLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/Globals.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/datasets/ROIDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ROIPoolingLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+RelativeTolerance<float> relative_tolerance_f32(0.01f);
+AbsoluteTolerance<float> absolute_tolerance_f32(0.001f);
+
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
+} // end namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(RoiPooling)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Successful test
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::QASYMM8), // Successful test (quantized)
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Incorrect rois type
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching data type input/output
+ TensorInfo(TensorShape(250U, 128U, 2U), 1, DataType::F32), // Mismatching depth size input/output
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching number of rois and output batch size
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Invalid number of values per ROIS
+ TensorInfo(TensorShape(250U, 128U, 3U), 1, DataType::F32), // Mismatching height and width input/output
+
+ }),
+ framework::dataset::make("RoisInfo", { TensorInfo(TensorShape(5, 4U), 1, DataType::U16),
+ TensorInfo(TensorShape(5, 4U), 1, DataType::U16),
+ TensorInfo(TensorShape(5, 4U), 1, DataType::F16),
+ TensorInfo(TensorShape(5, 4U), 1, DataType::U16),
+ TensorInfo(TensorShape(5, 4U), 1, DataType::U16),
+ TensorInfo(TensorShape(5, 10U), 1, DataType::U16),
+ TensorInfo(TensorShape(4, 4U), 1, DataType::U16),
+ TensorInfo(TensorShape(5, 4U), 1, DataType::U16),
+ })),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::F16),
+ TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(7U, 7U, 3U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 5U, 3U, 4U), 1, DataType::F32),
+ })),
+ framework::dataset::make("PoolInfo", { ROIPoolingLayerInfo(7U, 7U, 1./8),
+ ROIPoolingLayerInfo(7U, 7U, 1./8),
+ ROIPoolingLayerInfo(7U, 7U, 1./8),
+ ROIPoolingLayerInfo(7U, 7U, 1./8),
+ ROIPoolingLayerInfo(7U, 7U, 1./8),
+ ROIPoolingLayerInfo(7U, 7U, 1./8),
+ ROIPoolingLayerInfo(7U, 7U, 1./8),
+ ROIPoolingLayerInfo(7U, 7U, 1./8),
+ })),
+ framework::dataset::make("Expected", { true, true, false, false, false, false, false })),
+ input_info, rois_info, output_info, pool_info, expected)
+{
+ ARM_COMPUTE_EXPECT(bool(NEROIPoolingLayer::validate(&input_info.clone()->set_is_resizable(true), &rois_info.clone()->set_is_resizable(true), &output_info.clone()->set_is_resizable(true), pool_info)) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+using NEROIPoolingLayerFloatFixture = ROIPoolingLayerFixture<Tensor, Accessor, NEROIPoolingLayer, float>;
+
+TEST_SUITE(Float)
+FIXTURE_DATA_TEST_CASE(SmallROIPoolingLayerFloat, NEROIPoolingLayerFloatFixture, framework::DatasetMode::ALL,
+ framework::dataset::combine(framework::dataset::combine(datasets::SmallROIDataset(),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, relative_tolerance_f32, .02f, absolute_tolerance_f32);
+}
+
+TEST_SUITE_END() // Float test suite end
+
+// Begin quantized tests
+TEST_SUITE(Quantized)
+template <typename T>
+using NEROIPoolingLayerQuantizedFixture = ROIPoolingLayerQuantizedFixture<Tensor, Accessor, NEROIPoolingLayer, T>;
+
+TEST_SUITE(QASYMM8)
+
+FIXTURE_DATA_TEST_CASE(Small, NEROIPoolingLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(datasets::SmallROIDataset(),
+ framework::dataset::make("DataType", { DataType::QASYMM8 })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 127) })),
+ framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(2.f / 255.f, 120) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+
+TEST_SUITE_END() // end qasymm8 tests
+TEST_SUITE_END() // end quantized tests
+
+TEST_SUITE_END() // RoiPooling
+TEST_SUITE_END() // NEON
+
+} // validation end
+} // test namespace end
+} // arm_compute namespace end
diff --git a/tests/validation/fixtures/ROIPoolingLayerFixture.h b/tests/validation/fixtures/ROIPoolingLayerFixture.h
new file mode 100644
index 0000000000..c32e7af180
--- /dev/null
+++ b/tests/validation/fixtures/ROIPoolingLayerFixture.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_ROIPOOLINGLAYER_FIXTURE
+#define ARM_COMPUTE_TEST_ROIPOOLINGLAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ROIPoolingLayer.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ROIPoolingLayerGenericFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo)
+ {
+ _target = compute_target(input_shape, data_type, data_layout, pool_info, rois_shape, qinfo, output_qinfo);
+ _reference = compute_reference(input_shape, data_type, pool_info, rois_shape, qinfo, output_qinfo);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+
+ template <typename U>
+ void generate_rois(U &&rois, const TensorShape &shape, const ROIPoolingLayerInfo &pool_info, TensorShape rois_shape, DataLayout data_layout = DataLayout::NCHW)
+ {
+ const size_t values_per_roi = rois_shape.x();
+ const size_t num_rois = rois_shape.y();
+
+ std::mt19937 gen(library->seed());
+ uint16_t *rois_ptr = static_cast<uint16_t *>(rois.data());
+
+ const float pool_width = pool_info.pooled_width();
+ const float pool_height = pool_info.pooled_height();
+ const float roi_scale = pool_info.spatial_scale();
+
+ // Calculate distribution bounds
+ const auto scaled_width = static_cast<float>((shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)] / roi_scale) / pool_width);
+ const auto scaled_height = static_cast<float>((shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)] / roi_scale) / pool_height);
+ const auto min_width = static_cast<float>(pool_width / roi_scale);
+ const auto min_height = static_cast<float>(pool_height / roi_scale);
+
+ // Create distributions
+ std::uniform_int_distribution<int> dist_batch(0, shape[3] - 1);
+ std::uniform_int_distribution<> dist_x1(0, scaled_width);
+ std::uniform_int_distribution<> dist_y1(0, scaled_height);
+ std::uniform_int_distribution<> dist_w(min_width, std::max(float(min_width), (pool_width - 2) * scaled_width));
+ std::uniform_int_distribution<> dist_h(min_height, std::max(float(min_height), (pool_height - 2) * scaled_height));
+
+ for(unsigned int pw = 0; pw < num_rois; ++pw)
+ {
+ const auto batch_idx = dist_batch(gen);
+ const auto x1 = dist_x1(gen);
+ const auto y1 = dist_y1(gen);
+ const auto x2 = x1 + dist_w(gen);
+ const auto y2 = y1 + dist_h(gen);
+
+ rois_ptr[values_per_roi * pw] = batch_idx;
+ rois_ptr[values_per_roi * pw + 1] = static_cast<uint16_t>(x1);
+ rois_ptr[values_per_roi * pw + 2] = static_cast<uint16_t>(y1);
+ rois_ptr[values_per_roi * pw + 3] = static_cast<uint16_t>(x2);
+ rois_ptr[values_per_roi * pw + 4] = static_cast<uint16_t>(y2);
+ }
+ }
+
+ TensorType compute_target(TensorShape input_shape,
+ DataType data_type,
+ DataLayout data_layout,
+ const ROIPoolingLayerInfo &pool_info,
+ const TensorShape rois_shape,
+ const QuantizationInfo &qinfo,
+ const QuantizationInfo &output_qinfo)
+ {
+ const QuantizationInfo rois_qinfo = is_data_type_quantized(data_type) ? QuantizationInfo(0.125f, 0) : QuantizationInfo();
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, qinfo, data_layout);
+ TensorType rois_tensor = create_tensor<TensorType>(rois_shape, _rois_data_type, 1, rois_qinfo);
+
+ // Initialise shape and declare output tensor dst
+ const TensorShape dst_shape;
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, output_qinfo, data_layout);
+
+ // Create and configure function
+ FunctionType roi_pool_layer;
+ roi_pool_layer.configure(&src, &rois_tensor, &dst, pool_info);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(rois_tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ rois_tensor.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!rois_tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src));
+ generate_rois(AccessorType(rois_tensor), input_shape, pool_info, rois_shape, data_layout);
+
+ // Compute function
+ roi_pool_layer.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape,
+ DataType data_type,
+ const ROIPoolingLayerInfo &pool_info,
+ const TensorShape rois_shape,
+ const QuantizationInfo &qinfo,
+ const QuantizationInfo &output_qinfo)
+ {
+ // Create reference tensor
+ SimpleTensor<T> src{ input_shape, data_type, 1, qinfo };
+ const QuantizationInfo rois_qinfo = is_data_type_quantized(data_type) ? QuantizationInfo(0.125f, 0) : QuantizationInfo();
+ SimpleTensor<uint16_t> rois_tensor{ rois_shape, _rois_data_type, 1, rois_qinfo };
+
+ // Fill reference tensor
+ fill(src);
+ generate_rois(rois_tensor, input_shape, pool_info, rois_shape);
+
+ return reference::roi_pool_layer(src, rois_tensor, pool_info, output_qinfo);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ const DataType _rois_data_type{ DataType::U16 };
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ROIPoolingLayerQuantizedFixture : public ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type,
+ DataLayout data_layout, QuantizationInfo qinfo, QuantizationInfo output_qinfo)
+ {
+ ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, pool_info, rois_shape,
+ data_type, data_layout, qinfo, output_qinfo);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ROIPoolingLayerFixture : public ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, const ROIPoolingLayerInfo pool_info, TensorShape rois_shape, DataType data_type, DataLayout data_layout)
+ {
+ ROIPoolingLayerGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, pool_info, rois_shape, data_type, data_layout,
+ QuantizationInfo(), QuantizationInfo());
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif /* ARM_COMPUTE_TEST_ROIPOOLINGLAYER_FIXTURE */ \ No newline at end of file
diff --git a/tests/validation/reference/ROIPoolingLayer.cpp b/tests/validation/reference/ROIPoolingLayer.cpp
new file mode 100644
index 0000000000..8dc3014763
--- /dev/null
+++ b/tests/validation/reference/ROIPoolingLayer.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "ROIPoolingLayer.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/validation/Helpers.h"
+#include <algorithm>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <>
+SimpleTensor<float> roi_pool_layer(const SimpleTensor<float> &src, const SimpleTensor<uint16_t> &rois, const ROIPoolingLayerInfo &pool_info, const QuantizationInfo &output_qinfo)
+{
+ ARM_COMPUTE_UNUSED(output_qinfo);
+
+ const size_t num_rois = rois.shape()[1];
+ const size_t values_per_roi = rois.shape()[0];
+ DataType output_data_type = src.data_type();
+
+ TensorShape input_shape = src.shape();
+ TensorShape output_shape(pool_info.pooled_width(), pool_info.pooled_height(), src.shape()[2], num_rois);
+ SimpleTensor<float> output(output_shape, output_data_type);
+
+ const int pooled_w = pool_info.pooled_width();
+ const int pooled_h = pool_info.pooled_height();
+ const float spatial_scale = pool_info.spatial_scale();
+
+ // get sizes of x and y dimensions in src tensor
+ const int width = src.shape()[0];
+ const int height = src.shape()[1];
+
+ // Move pointer across the fourth dimension
+ const size_t input_stride_w = input_shape[0] * input_shape[1] * input_shape[2];
+ const size_t output_stride_w = output_shape[0] * output_shape[1] * output_shape[2];
+
+ const auto *rois_ptr = reinterpret_cast<const uint16_t *>(rois.data());
+
+ // Iterate through pixel width (X-Axis)
+ for(size_t pw = 0; pw < num_rois; ++pw)
+ {
+ const unsigned int roi_batch = rois_ptr[values_per_roi * pw];
+ const auto x1 = rois_ptr[values_per_roi * pw + 1];
+ const auto y1 = rois_ptr[values_per_roi * pw + 2];
+ const auto x2 = rois_ptr[values_per_roi * pw + 3];
+ const auto y2 = rois_ptr[values_per_roi * pw + 4];
+
+ //Iterate through pixel height (Y-Axis)
+ for(size_t fm = 0; fm < input_shape[2]; ++fm)
+ {
+ // Iterate through regions of interest index
+ for(size_t py = 0; py < pool_info.pooled_height(); ++py)
+ {
+ // Scale ROI
+ const int roi_anchor_x = support::cpp11::round(x1 * spatial_scale);
+ const int roi_anchor_y = support::cpp11::round(y1 * spatial_scale);
+ const int roi_width = std::max(support::cpp11::round((x2 - x1) * spatial_scale), 1.f);
+ const int roi_height = std::max(support::cpp11::round((y2 - y1) * spatial_scale), 1.f);
+
+ // Iterate over feature map (Z axis)
+ for(size_t px = 0; px < pool_info.pooled_width(); ++px)
+ {
+ auto region_start_x = static_cast<int>(std::floor((static_cast<float>(px) / pooled_w) * roi_width));
+ auto region_end_x = static_cast<int>(std::floor((static_cast<float>(px + 1) / pooled_w) * roi_width));
+ auto region_start_y = static_cast<int>(std::floor((static_cast<float>(py) / pooled_h) * roi_height));
+ auto region_end_y = static_cast<int>(std::floor((static_cast<float>(py + 1) / pooled_h) * roi_height));
+
+ region_start_x = std::min(std::max(region_start_x + roi_anchor_x, 0), width);
+ region_end_x = std::min(std::max(region_end_x + roi_anchor_x, 0), width);
+ region_start_y = std::min(std::max(region_start_y + roi_anchor_y, 0), height);
+ region_end_y = std::min(std::max(region_end_y + roi_anchor_y, 0), height);
+
+ // Iterate through the pooling region
+ if((region_end_x <= region_start_x) || (region_end_y <= region_start_y))
+ {
+ /* Assign element in tensor 'output' at coordinates px, py, fm, roi_indx, to 0 */
+ auto out_ptr = output.data() + px + py * output_shape[0] + fm * output_shape[0] * output_shape[1] + pw * output_stride_w;
+ *out_ptr = 0;
+ }
+ else
+ {
+ float curr_max = -std::numeric_limits<float>::max();
+ for(int j = region_start_y; j < region_end_y; ++j)
+ {
+ for(int i = region_start_x; i < region_end_x; ++i)
+ {
+ /* Retrieve element from input tensor at coordinates(i, j, fm, roi_batch) */
+ float in_element = *(src.data() + i + j * input_shape[0] + fm * input_shape[0] * input_shape[1] + roi_batch * input_stride_w);
+ curr_max = std::max(in_element, curr_max);
+ }
+ }
+
+ /* Assign element in tensor 'output' at coordinates px, py, fm, roi_indx, to curr_max */
+ auto out_ptr = output.data() + px + py * output_shape[0] + fm * output_shape[0] * output_shape[1] + pw * output_stride_w;
+ *out_ptr = curr_max;
+ }
+ }
+ }
+ }
+ }
+
+ return output;
+}
+
+/*
+ Template genericised method to allow calling of roi_pooling_layer with quantized 8 bit datatype
+*/
+template <>
+SimpleTensor<uint8_t> roi_pool_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint16_t> &rois, const ROIPoolingLayerInfo &pool_info, const QuantizationInfo &output_qinfo)
+{
+ const SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
+ SimpleTensor<float> dst_tmp = roi_pool_layer<float>(src_tmp, rois, pool_info, output_qinfo);
+ SimpleTensor<uint8_t> dst = convert_to_asymmetric<uint8_t>(dst_tmp, output_qinfo);
+ return dst;
+}
+
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/reference/ROIPoolingLayer.h b/tests/validation/reference/ROIPoolingLayer.h
new file mode 100644
index 0000000000..ddbaee2d5e
--- /dev/null
+++ b/tests/validation/reference/ROIPoolingLayer.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_ROIPOOLLAYER_H
+#define ARM_COMPUTE_TEST_ROIPOOLLAYER_H
+
+#include "arm_compute/core/Types.h"
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> roi_pool_layer(const SimpleTensor<T> &src, const SimpleTensor<uint16_t> &rois, const ROIPoolingLayerInfo &pool_info, const QuantizationInfo &output_qinfo);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif /* ARM_COMPUTE_TEST_ROIPOOLLAYER_H */ \ No newline at end of file