aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2018-11-16 10:02:26 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2018-11-16 17:47:02 +0000
commitc8df89f477c3dc63f396ad37bee8ed5d50dee4ac (patch)
tree4fc4151b382438416b531b3bcb586f88eb32c2d1
parenta25d16c86f0d870408bc8b941aa755093417b0f0 (diff)
downloadComputeLibrary-c8df89f477c3dc63f396ad37bee8ed5d50dee4ac.tar.gz
COMPMID-1451: (3RDPARTY_UPDATE) Fixes for GenerateProposals graph node and BoxWithNMSLimitKernel
COMPMID-1792: Accuracy issue in CLGenerateProposals This patch does the following: - Some fixes for GenerateProposals function and tests - Adapting BoxWithNMSLimitKernel to only accept U32 tensors as keeps_size - Update 3rdparty - Adds a small tolerance for a GenerateProposals test Change-Id: Ia8ec1cdfe941fe05003645e86deb9ea6a6044d74
m---------3rdparty0
-rw-r--r--src/core/CL/kernels/CLROIAlignLayerKernel.cpp2
-rw-r--r--src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp5
-rw-r--r--src/graph/nodes/GenerateProposalsLayerNode.cpp3
-rw-r--r--src/runtime/CL/functions/CLGenerateProposalsLayer.cpp12
-rw-r--r--tests/validation/CL/GenerateProposalsLayer.cpp26
6 files changed, 32 insertions, 16 deletions
diff --git a/3rdparty b/3rdparty
-Subproject d3f7f2a41cd41c8026b483dae9ed64c03b61a82
+Subproject 1d9c5d994bc052e72d3cb0eb8f40f189071ca51
diff --git a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
index 2d2ac0717f..325eeb240f 100644
--- a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
+++ b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp
@@ -47,11 +47,13 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *rois, ITe
ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NCHW);
ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0));
if(output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) != pool_info.pooled_width()) || (output->dimension(1) != pool_info.pooled_height()));
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != output->dimension(2));
ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(1) != output->dimension(3));
diff --git a/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp b/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp
index 2b9934cfa8..06a0551e46 100644
--- a/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp
+++ b/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp
@@ -328,7 +328,7 @@ void CPPBoxWithNonMaximaSuppressionLimitKernel::run_nmslimit()
{
*reinterpret_cast<T *>(_keeps->ptr_to_element(Coordinates(cur_start_idx + cur_out_idx + i))) = static_cast<T>(keeps[j].at(i));
}
- *reinterpret_cast<T *>(_keeps_size->ptr_to_element(Coordinates(j + b * num_classes))) = static_cast<T>(keeps[j].size());
+ *reinterpret_cast<uint32_t *>(_keeps_size->ptr_to_element(Coordinates(j + b * num_classes))) = keeps[j].size();
cur_out_idx += keeps[j].size();
}
}
@@ -356,13 +356,14 @@ void CPPBoxWithNonMaximaSuppressionLimitKernel::configure(const ITensor *scores_
ARM_COMPUTE_UNUSED(num_classes);
ARM_COMPUTE_ERROR_ON_MSG((4 * num_classes) != boxes_in->info()->dimension(0), "First dimension of input boxes must be of size 4*num_classes");
ARM_COMPUTE_ERROR_ON_MSG(scores_in->info()->dimension(1) != boxes_in->info()->dimension(1), "Input scores and input boxes must have the same number of rows");
+
ARM_COMPUTE_ERROR_ON(scores_out->info()->dimension(0) != boxes_out->info()->dimension(1));
ARM_COMPUTE_ERROR_ON(boxes_out->info()->dimension(0) != 4);
if(keeps != nullptr)
{
ARM_COMPUTE_ERROR_ON_MSG(keeps_size == nullptr, "keeps_size cannot be nullptr if keeps has to be provided as output");
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(scores_in, keeps);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(scores_in, keeps_size);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(keeps_size, 1, DataType::U32);
ARM_COMPUTE_ERROR_ON(scores_out->info()->dimension(0) != keeps->info()->dimension(0));
ARM_COMPUTE_ERROR_ON(num_classes != keeps_size->info()->dimension(0));
}
diff --git a/src/graph/nodes/GenerateProposalsLayerNode.cpp b/src/graph/nodes/GenerateProposalsLayerNode.cpp
index f5a3c02dd5..7367e80539 100644
--- a/src/graph/nodes/GenerateProposalsLayerNode.cpp
+++ b/src/graph/nodes/GenerateProposalsLayerNode.cpp
@@ -80,7 +80,8 @@ TensorDescriptor GenerateProposalsLayerNode::configure_output(size_t idx) const
break;
case 2:
// Configure num_valid_proposals
- output_desc.shape = TensorShape(1);
+ output_desc.shape = TensorShape(1);
+ output_desc.data_type = DataType::U32;
break;
default:
ARM_COMPUTE_ERROR("Unsupported output index");
diff --git a/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp b/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp
index 80ed0e55a4..5dd120277a 100644
--- a/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp
+++ b/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp
@@ -57,6 +57,9 @@ CLGenerateProposalsLayer::CLGenerateProposalsLayer(std::shared_ptr<IMemoryManage
void CLGenerateProposalsLayer::configure(const ICLTensor *scores, const ICLTensor *deltas, const ICLTensor *anchors, ICLTensor *proposals, ICLTensor *scores_out, ICLTensor *num_valid_proposals,
const GenerateProposalsInfo &info)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(scores, deltas, anchors, proposals, scores_out, num_valid_proposals);
+ ARM_COMPUTE_ERROR_THROW_ON(CLGenerateProposalsLayer::validate(scores->info(), deltas->info(), anchors->info(), proposals->info(), scores_out->info(), num_valid_proposals->info(), info));
+
const DataType data_type = deltas->info()->data_type();
const int num_anchors = scores->info()->dimension(2);
const int feat_width = scores->info()->dimension(0);
@@ -109,7 +112,7 @@ void CLGenerateProposalsLayer::configure(const ICLTensor *scores, const ICLTenso
// Note that NMS needs outputs preinitialized.
auto_init_if_empty(*scores_out->info(), TensorShape(scores_nms_size), 1, data_type);
auto_init_if_empty(*_proposals_4_roi_values.info(), TensorShape(values_per_roi, scores_nms_size), 1, data_type);
- auto_init_if_empty(*num_valid_proposals->info(), TensorShape(values_per_roi, scores_nms_size), 1, data_type);
+ auto_init_if_empty(*num_valid_proposals->info(), TensorShape(1), 1, DataType::U32);
// Initialize temporaries (unused) outputs
_classes_nms_unused.allocator()->init(TensorInfo(TensorShape(1, 1), 1, data_type));
@@ -137,7 +140,8 @@ void CLGenerateProposalsLayer::configure(const ICLTensor *scores, const ICLTenso
Status CLGenerateProposalsLayer::validate(const ITensorInfo *scores, const ITensorInfo *deltas, const ITensorInfo *anchors, const ITensorInfo *proposals, const ITensorInfo *scores_out,
const ITensorInfo *num_valid_proposals, const GenerateProposalsInfo &info)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(proposals, scores_out, num_valid_proposals);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(scores, deltas, anchors, proposals, scores_out, num_valid_proposals);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(scores, DataLayout::NCHW);
const int num_anchors = scores->dimension(2);
const int feat_width = scores->dimension(0);
@@ -161,7 +165,7 @@ Status CLGenerateProposalsLayer::validate(const ITensorInfo *scores, const ITens
ARM_COMPUTE_RETURN_ON_ERROR(CLPermuteKernel::validate(scores, &scores_permuted_info, PermutationVector{ 2, 0, 1 }));
TensorInfo scores_flattened_info(deltas->clone()->set_tensor_shape(TensorShape(1, total_num_anchors)).set_is_resizable(true));
- TensorInfo proposals_4_roi_values(proposals->clone()->set_tensor_shape(TensorShape(values_per_roi, total_num_anchors)).set_is_resizable(true));
+ TensorInfo proposals_4_roi_values(deltas->clone()->set_tensor_shape(TensorShape(values_per_roi, total_num_anchors)).set_is_resizable(true));
ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayerKernel::validate(&scores_permuted_info, &scores_flattened_info));
ARM_COMPUTE_RETURN_ON_ERROR(CLBoundingBoxTransformKernel::validate(&all_anchors_info, &proposals_4_roi_values, &deltas_flattened_info, BoundingBoxTransformInfo(info.im_width(), info.im_height(),
@@ -174,7 +178,7 @@ Status CLGenerateProposalsLayer::validate(const ITensorInfo *scores, const ITens
{
ARM_COMPUTE_RETURN_ERROR_ON(num_valid_proposals->num_dimensions() > 1);
ARM_COMPUTE_RETURN_ERROR_ON(num_valid_proposals->dimension(0) > 1);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(num_valid_proposals, DataType::U32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(num_valid_proposals, 1, DataType::U32);
}
if(proposals->total_size() > 0)
diff --git a/tests/validation/CL/GenerateProposalsLayer.cpp b/tests/validation/CL/GenerateProposalsLayer.cpp
index 28cdc71ae6..b4772fcf79 100644
--- a/tests/validation/CL/GenerateProposalsLayer.cpp
+++ b/tests/validation/CL/GenerateProposalsLayer.cpp
@@ -68,38 +68,45 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Mismatching types
TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Wrong deltas (number of transformation non multiple of 4)
TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Wrong anchors (number of values per roi != 5)
- TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16)}), // Output tensor num_valid_proposals not scalar
+ TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Output tensor num_valid_proposals not scalar
+ TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16)}), // num_valid_proposals not U32
framework::dataset::make("deltas",{ TensorInfo(TensorShape(100U, 100U, 36U), 1, DataType::F32),
TensorInfo(TensorShape(100U, 100U, 36U), 1, DataType::F32),
TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
+ TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32)})),
framework::dataset::make("anchors", { TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
TensorInfo(TensorShape(5U, 9U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
TensorInfo(TensorShape(4U, 9U), 1, DataType::F32)})),
framework::dataset::make("proposals", { TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
+ TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32)})),
framework::dataset::make("scores_out", { TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
+ TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32)})),
framework::dataset::make("num_valid_proposals", { TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
- TensorInfo(TensorShape(1U, 10U), 1, DataType::U32)})),
+ TensorInfo(TensorShape(1U, 10U), 1, DataType::U32),
+ TensorInfo(TensorShape(1U, 1U), 1, DataType::F16)})),
framework::dataset::make("generate_proposals_info", { GenerateProposalsInfo(10.f, 10.f, 1.f),
GenerateProposalsInfo(10.f, 10.f, 1.f),
GenerateProposalsInfo(10.f, 10.f, 1.f),
GenerateProposalsInfo(10.f, 10.f, 1.f),
+ GenerateProposalsInfo(10.f, 10.f, 1.f),
GenerateProposalsInfo(10.f, 10.f, 1.f)})),
- framework::dataset::make("Expected", { true, false, false, false, false })),
+ framework::dataset::make("Expected", { true, false, false, false, false, false })),
scores, deltas, anchors, proposals, scores_out, num_valid_proposals, generate_proposals_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLGenerateProposalsLayer::validate(&scores.clone()->set_is_resizable(true),
@@ -262,7 +269,7 @@ DATA_TEST_CASE(IntegrationTestCaseGenerateProposals, framework::DatasetMode::ALL
CLTensor proposals;
CLTensor num_valid_proposals;
CLTensor scores_out;
- num_valid_proposals.allocator()->init(TensorInfo(TensorShape(1), 1, DataType::F32));
+ num_valid_proposals.allocator()->init(TensorInfo(TensorShape(1), 1, DataType::U32));
CLGenerateProposalsLayer generate_proposals;
generate_proposals.configure(&scores, &bbox_deltas, &anchors, &proposals, &scores_out, &num_valid_proposals,
@@ -286,26 +293,27 @@ DATA_TEST_CASE(IntegrationTestCaseGenerateProposals, framework::DatasetMode::ALL
// Gather num_valid_proposals
num_valid_proposals.map();
- const float N = *reinterpret_cast<float *>(num_valid_proposals.ptr_to_element(Coordinates(0, 0)));
+ const uint32_t N = *reinterpret_cast<uint32_t *>(num_valid_proposals.ptr_to_element(Coordinates(0, 0)));
num_valid_proposals.unmap();
// Select the first N entries of the proposals
CLTensor proposals_final;
CLSlice select_proposals;
- select_proposals.configure(&proposals, &proposals_final, Coordinates(0, 0), Coordinates(values_per_roi + 1, size_t(N)));
+ select_proposals.configure(&proposals, &proposals_final, Coordinates(0, 0), Coordinates(values_per_roi + 1, N));
proposals_final.allocator()->allocate();
select_proposals.run();
// Select the first N entries of the proposals
CLTensor scores_final;
CLSlice select_scores;
- select_scores.configure(&scores_out, &scores_final, Coordinates(0), Coordinates(size_t(N)));
+ select_scores.configure(&scores_out, &scores_final, Coordinates(0), Coordinates(N));
scores_final.allocator()->allocate();
select_scores.run();
+ const RelativeTolerance<float> tolerance_f32(1e-6f);
// Validate the output
- validate(CLAccessor(proposals_final), proposals_expected);
- validate(CLAccessor(scores_final), scores_expected);
+ validate(CLAccessor(proposals_final), proposals_expected, tolerance_f32);
+ validate(CLAccessor(scores_final), scores_expected, tolerance_f32);
}
FIXTURE_DATA_TEST_CASE(ComputeAllAnchors, CLComputeAllAnchorsFixture<float>, framework::DatasetMode::ALL,