ArmNN
 21.05
DetectionPostProcessTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <ResolveType.hpp>
8 
9 #include <armnn/Types.hpp>
10 
14 
18 
19 #include <test/TensorHelpers.hpp>
20 
21 namespace
22 {
23 
24 using FloatData = std::vector<float>;
25 using QuantData = std::pair<float, int32_t>;
26 
27 struct TestData
28 {
29  static const armnn::TensorShape s_BoxEncodingsShape;
30  static const armnn::TensorShape s_ScoresShape;
31  static const armnn::TensorShape s_AnchorsShape;
32 
33  static const QuantData s_BoxEncodingsQuantData;
34  static const QuantData s_ScoresQuantData;
35  static const QuantData s_AnchorsQuantData;
36 
37  static const FloatData s_BoxEncodings;
38  static const FloatData s_Scores;
39  static const FloatData s_Anchors;
40 };
41 
42 struct RegularNmsExpectedResults
43 {
44  static const FloatData s_DetectionBoxes;
45  static const FloatData s_DetectionScores;
46  static const FloatData s_DetectionClasses;
47  static const FloatData s_NumDetections;
48 };
49 
50 struct FastNmsExpectedResults
51 {
52  static const FloatData s_DetectionBoxes;
53  static const FloatData s_DetectionScores;
54  static const FloatData s_DetectionClasses;
55  static const FloatData s_NumDetections;
56 };
57 
58 const armnn::TensorShape TestData::s_BoxEncodingsShape = { 1, 6, 4 };
59 const armnn::TensorShape TestData::s_ScoresShape = { 1, 6, 3 };
60 const armnn::TensorShape TestData::s_AnchorsShape = { 6, 4 };
61 
62 const QuantData TestData::s_BoxEncodingsQuantData = { 1.00f, 1 };
63 const QuantData TestData::s_ScoresQuantData = { 0.01f, 0 };
64 const QuantData TestData::s_AnchorsQuantData = { 0.50f, 0 };
65 
66 const FloatData TestData::s_BoxEncodings =
67 {
68  0.0f, 0.0f, 0.0f, 0.0f,
69  0.0f, 1.0f, 0.0f, 0.0f,
70  0.0f, -1.0f, 0.0f, 0.0f,
71  0.0f, 0.0f, 0.0f, 0.0f,
72  0.0f, 1.0f, 0.0f, 0.0f,
73  0.0f, 0.0f, 0.0f, 0.0f
74 };
75 
76 const FloatData TestData::s_Scores =
77 {
78  0.0f, 0.90f, 0.80f,
79  0.0f, 0.75f, 0.72f,
80  0.0f, 0.60f, 0.50f,
81  0.0f, 0.93f, 0.95f,
82  0.0f, 0.50f, 0.40f,
83  0.0f, 0.30f, 0.20f
84 };
85 
86 const FloatData TestData::s_Anchors =
87 {
88  0.5f, 0.5f, 1.0f, 1.0f,
89  0.5f, 0.5f, 1.0f, 1.0f,
90  0.5f, 0.5f, 1.0f, 1.0f,
91  0.5f, 10.5f, 1.0f, 1.0f,
92  0.5f, 10.5f, 1.0f, 1.0f,
93  0.5f, 100.5f, 1.0f, 1.0f
94 };
95 
96 const FloatData RegularNmsExpectedResults::s_DetectionBoxes =
97 {
98  0.0f, 10.0f, 1.0f, 11.0f,
99  0.0f, 10.0f, 1.0f, 11.0f,
100  0.0f, 0.0f, 0.0f, 0.0f
101 };
102 
103 const FloatData RegularNmsExpectedResults::s_DetectionScores =
104 {
105  0.95f, 0.93f, 0.0f
106 };
107 
108 const FloatData RegularNmsExpectedResults::s_DetectionClasses =
109 {
110  1.0f, 0.0f, 0.0f
111 };
112 
113 const FloatData RegularNmsExpectedResults::s_NumDetections = { 2.0f };
114 
115 const FloatData FastNmsExpectedResults::s_DetectionBoxes =
116 {
117  0.0f, 10.0f, 1.0f, 11.0f,
118  0.0f, 0.0f, 1.0f, 1.0f,
119  0.0f, 100.0f, 1.0f, 101.0f
120 };
121 
122 const FloatData FastNmsExpectedResults::s_DetectionScores =
123 {
124  0.95f, 0.9f, 0.3f
125 };
126 
127 const FloatData FastNmsExpectedResults::s_DetectionClasses =
128 {
129  1.0f, 0.0f, 0.0f
130 };
131 
132 const FloatData FastNmsExpectedResults::s_NumDetections = { 3.0f };
133 
134 } // anonymous namespace
135 
136 template<typename FactoryType,
137  armnn::DataType ArmnnType,
138  typename T = armnn::ResolveType<ArmnnType>>
139 void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
142  const std::vector<T>& boxEncodingsData,
143  const std::vector<T>& scoresData,
144  const std::vector<T>& anchorsData,
145  const std::vector<float>& expectedDetectionBoxes,
146  const std::vector<float>& expectedDetectionClasses,
147  const std::vector<float>& expectedDetectionScores,
148  const std::vector<float>& expectedNumDetections,
149  bool useRegularNms)
150 {
151  std::unique_ptr<armnn::IProfiler> profiler = std::make_unique<armnn::IProfiler>();
153 
154  auto memoryManager = WorkloadFactoryHelper<FactoryType>::GetMemoryManager();
155  FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
156  auto tensorHandleFactory = WorkloadFactoryHelper<FactoryType>::GetTensorHandleFactory(memoryManager);
157 
158  auto boxEncodings = MakeTensor<T, 3>(boxEncodingsInfo, boxEncodingsData);
159  auto scores = MakeTensor<T, 3>(scoresInfo, scoresData);
160  auto anchors = MakeTensor<T, 2>(anchorsInfo, anchorsData);
161 
162  armnn::TensorInfo detectionBoxesInfo({ 1, 3, 4 }, armnn::DataType::Float32);
163  armnn::TensorInfo detectionScoresInfo({ 1, 3 }, armnn::DataType::Float32);
164  armnn::TensorInfo detectionClassesInfo({ 1, 3 }, armnn::DataType::Float32);
165  armnn::TensorInfo numDetectionInfo({ 1 }, armnn::DataType::Float32);
166 
167  LayerTestResult<float, 3> detectionBoxesResult(detectionBoxesInfo);
168  detectionBoxesResult.outputExpected = MakeTensor<float, 3>(detectionBoxesInfo, expectedDetectionBoxes);
169  LayerTestResult<float, 2> detectionClassesResult(detectionClassesInfo);
170  detectionClassesResult.outputExpected = MakeTensor<float, 2>(detectionClassesInfo, expectedDetectionClasses);
171  LayerTestResult<float, 2> detectionScoresResult(detectionScoresInfo);
172  detectionScoresResult.outputExpected = MakeTensor<float, 2>(detectionScoresInfo, expectedDetectionScores);
173  LayerTestResult<float, 1> numDetectionsResult(numDetectionInfo);
174  numDetectionsResult.outputExpected = MakeTensor<float, 1>(numDetectionInfo, expectedNumDetections);
175 
176  auto boxedHandle = tensorHandleFactory.CreateTensorHandle(boxEncodingsInfo);
177  auto scoreshandle = tensorHandleFactory.CreateTensorHandle(scoresInfo);
178  auto anchorsHandle = tensorHandleFactory.CreateTensorHandle(anchorsInfo);
179  auto outputBoxesHandle = tensorHandleFactory.CreateTensorHandle(detectionBoxesInfo);
180  auto classesHandle = tensorHandleFactory.CreateTensorHandle(detectionClassesInfo);
181  auto outputScoresHandle = tensorHandleFactory.CreateTensorHandle(detectionScoresInfo);
182  auto numDetectionHandle = tensorHandleFactory.CreateTensorHandle(numDetectionInfo);
183 
184  armnn::ScopedTensorHandle anchorsTensor(anchorsInfo);
185  AllocateAndCopyDataToITensorHandle(&anchorsTensor, &anchors[0][0]);
186 
188  data.m_Parameters.m_UseRegularNms = useRegularNms;
189  data.m_Parameters.m_MaxDetections = 3;
193  data.m_Parameters.m_NmsIouThreshold = 0.5;
194  data.m_Parameters.m_NumClasses = 2;
195  data.m_Parameters.m_ScaleY = 10.0;
196  data.m_Parameters.m_ScaleX = 10.0;
197  data.m_Parameters.m_ScaleH = 5.0;
198  data.m_Parameters.m_ScaleW = 5.0;
199  data.m_Anchors = &anchorsTensor;
200 
201  armnn::WorkloadInfo info;
202  AddInputToWorkload(data, info, boxEncodingsInfo, boxedHandle.get());
203  AddInputToWorkload(data, info, scoresInfo, scoreshandle.get());
204  AddOutputToWorkload(data, info, detectionBoxesInfo, outputBoxesHandle.get());
205  AddOutputToWorkload(data, info, detectionClassesInfo, classesHandle.get());
206  AddOutputToWorkload(data, info, detectionScoresInfo, outputScoresHandle.get());
207  AddOutputToWorkload(data, info, numDetectionInfo, numDetectionHandle.get());
208 
209  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDetectionPostProcess(data, info);
210 
211  boxedHandle->Allocate();
212  scoreshandle->Allocate();
213  outputBoxesHandle->Allocate();
214  classesHandle->Allocate();
215  outputScoresHandle->Allocate();
216  numDetectionHandle->Allocate();
217 
218  CopyDataToITensorHandle(boxedHandle.get(), boxEncodings.origin());
219  CopyDataToITensorHandle(scoreshandle.get(), scores.origin());
220 
221  workload->Execute();
222 
223  CopyDataFromITensorHandle(detectionBoxesResult.output.origin(), outputBoxesHandle.get());
224  CopyDataFromITensorHandle(detectionClassesResult.output.origin(), classesHandle.get());
225  CopyDataFromITensorHandle(detectionScoresResult.output.origin(), outputScoresHandle.get());
226  CopyDataFromITensorHandle(numDetectionsResult.output.origin(), numDetectionHandle.get());
227 
228  BOOST_TEST(CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected));
229  BOOST_TEST(CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected));
230  BOOST_TEST(CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected));
231  BOOST_TEST(CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected));
232 }
233 
234 template<armnn::DataType QuantizedType, typename RawType = armnn::ResolveType<QuantizedType>>
235 void QuantizeData(RawType* quant, const float* dequant, const armnn::TensorInfo& info)
236 {
237  for (size_t i = 0; i < info.GetNumElements(); i++)
238  {
239  quant[i] = armnn::Quantize<RawType>(
240  dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
241  }
242 }
243 
244 template<typename FactoryType>
246 {
247  return DetectionPostProcessImpl<FactoryType, armnn::DataType::Float32>(
248  armnn::TensorInfo(TestData::s_BoxEncodingsShape, armnn::DataType::Float32),
249  armnn::TensorInfo(TestData::s_ScoresShape, armnn::DataType::Float32),
250  armnn::TensorInfo(TestData::s_AnchorsShape, armnn::DataType::Float32),
251  TestData::s_BoxEncodings,
252  TestData::s_Scores,
253  TestData::s_Anchors,
254  RegularNmsExpectedResults::s_DetectionBoxes,
255  RegularNmsExpectedResults::s_DetectionClasses,
256  RegularNmsExpectedResults::s_DetectionScores,
257  RegularNmsExpectedResults::s_NumDetections,
258  true);
259 }
260 
261 template<typename FactoryType,
262  armnn::DataType QuantizedType,
263  typename RawType = armnn::ResolveType<QuantizedType>>
265 {
266  armnn::TensorInfo boxEncodingsInfo(TestData::s_BoxEncodingsShape, QuantizedType);
267  armnn::TensorInfo scoresInfo(TestData::s_ScoresShape, QuantizedType);
268  armnn::TensorInfo anchorsInfo(TestData::s_AnchorsShape, QuantizedType);
269 
270  boxEncodingsInfo.SetQuantizationScale(TestData::s_BoxEncodingsQuantData.first);
271  boxEncodingsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second);
272 
273  scoresInfo.SetQuantizationScale(TestData::s_ScoresQuantData.first);
274  scoresInfo.SetQuantizationOffset(TestData::s_ScoresQuantData.second);
275 
276  anchorsInfo.SetQuantizationScale(TestData::s_AnchorsQuantData.first);
277  anchorsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second);
278 
279  std::vector<RawType> boxEncodingsData(TestData::s_BoxEncodingsShape.GetNumElements());
280  QuantizeData<QuantizedType>(boxEncodingsData.data(),
281  TestData::s_BoxEncodings.data(),
282  boxEncodingsInfo);
283 
284  std::vector<RawType> scoresData(TestData::s_ScoresShape.GetNumElements());
285  QuantizeData<QuantizedType>(scoresData.data(),
286  TestData::s_Scores.data(),
287  scoresInfo);
288 
289  std::vector<RawType> anchorsData(TestData::s_AnchorsShape.GetNumElements());
290  QuantizeData<QuantizedType>(anchorsData.data(),
291  TestData::s_Anchors.data(),
292  anchorsInfo);
293 
294  return DetectionPostProcessImpl<FactoryType, QuantizedType>(
295  boxEncodingsInfo,
296  scoresInfo,
297  anchorsInfo,
298  boxEncodingsData,
299  scoresData,
300  anchorsData,
301  RegularNmsExpectedResults::s_DetectionBoxes,
302  RegularNmsExpectedResults::s_DetectionClasses,
303  RegularNmsExpectedResults::s_DetectionScores,
304  RegularNmsExpectedResults::s_NumDetections,
305  true);
306 }
307 
308 template<typename FactoryType>
310 {
311  return DetectionPostProcessImpl<FactoryType, armnn::DataType::Float32>(
312  armnn::TensorInfo(TestData::s_BoxEncodingsShape, armnn::DataType::Float32),
313  armnn::TensorInfo(TestData::s_ScoresShape, armnn::DataType::Float32),
314  armnn::TensorInfo(TestData::s_AnchorsShape, armnn::DataType::Float32),
315  TestData::s_BoxEncodings,
316  TestData::s_Scores,
317  TestData::s_Anchors,
318  FastNmsExpectedResults::s_DetectionBoxes,
319  FastNmsExpectedResults::s_DetectionClasses,
320  FastNmsExpectedResults::s_DetectionScores,
321  FastNmsExpectedResults::s_NumDetections,
322  false);
323 }
324 
325 template<typename FactoryType,
326  armnn::DataType QuantizedType,
327  typename RawType = armnn::ResolveType<QuantizedType>>
329 {
330  armnn::TensorInfo boxEncodingsInfo(TestData::s_BoxEncodingsShape, QuantizedType);
331  armnn::TensorInfo scoresInfo(TestData::s_ScoresShape, QuantizedType);
332  armnn::TensorInfo anchorsInfo(TestData::s_AnchorsShape, QuantizedType);
333 
334  boxEncodingsInfo.SetQuantizationScale(TestData::s_BoxEncodingsQuantData.first);
335  boxEncodingsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second);
336 
337  scoresInfo.SetQuantizationScale(TestData::s_ScoresQuantData.first);
338  scoresInfo.SetQuantizationOffset(TestData::s_ScoresQuantData.second);
339 
340  anchorsInfo.SetQuantizationScale(TestData::s_AnchorsQuantData.first);
341  anchorsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second);
342 
343  std::vector<RawType> boxEncodingsData(TestData::s_BoxEncodingsShape.GetNumElements());
344  QuantizeData<QuantizedType>(boxEncodingsData.data(),
345  TestData::s_BoxEncodings.data(),
346  boxEncodingsInfo);
347 
348  std::vector<RawType> scoresData(TestData::s_ScoresShape.GetNumElements());
349  QuantizeData<QuantizedType>(scoresData.data(),
350  TestData::s_Scores.data(),
351  scoresInfo);
352 
353  std::vector<RawType> anchorsData(TestData::s_AnchorsShape.GetNumElements());
354  QuantizeData<QuantizedType>(anchorsData.data(),
355  TestData::s_Anchors.data(),
356  anchorsInfo);
357 
358  return DetectionPostProcessImpl<FactoryType, QuantizedType>(
359  boxEncodingsInfo,
360  scoresInfo,
361  anchorsInfo,
362  boxEncodingsData,
363  scoresData,
364  anchorsData,
365  FastNmsExpectedResults::s_DetectionBoxes,
366  FastNmsExpectedResults::s_DetectionClasses,
367  FastNmsExpectedResults::s_DetectionScores,
368  FastNmsExpectedResults::s_NumDetections,
369  false);
370 }
float m_ScaleW
Center size encoding scale weight.
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:489
void QuantizeData(RawType *quant, const float *dequant, const armnn::TensorInfo &info)
float m_ScaleX
Center size encoding scale x.
boost::test_tools::predicate_result CompareTensors(const boost::multi_array< T, n > &a, const boost::multi_array< T, n > &b, bool compareBoolean=false, bool isDynamic=false)
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
void DetectionPostProcessImpl(const armnn::TensorInfo &boxEncodingsInfo, const armnn::TensorInfo &scoresInfo, const armnn::TensorInfo &anchorsInfo, const std::vector< T > &boxEncodingsData, const std::vector< T > &scoresData, const std::vector< T > &anchorsData, const std::vector< float > &expectedDetectionBoxes, const std::vector< float > &expectedDetectionClasses, const std::vector< float > &expectedDetectionScores, const std::vector< float > &expectedNumDetections, bool useRegularNms)
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
void DetectionPostProcessRegularNmsFloatTest()
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
uint32_t m_MaxDetections
Maximum numbers of detections.
DataType
Definition: Types.hpp:36
void DetectionPostProcessRegularNmsQuantizedTest()
float m_NmsIouThreshold
Intersection over union threshold.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:469
float GetQuantizationScale() const
Definition: Tensor.cpp:452
uint32_t m_NumClasses
Number of classes.
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
bool m_UseRegularNms
Use Regular NMS.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
float m_ScaleH
Center size encoding scale height.
void DetectionPostProcessFastNmsQuantizedTest()
void RegisterProfiler(IProfiler *profiler)
Definition: Profiling.cpp:496
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
ClWorkloadFactory FactoryType
void DetectionPostProcessFastNmsFloatTest()
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
float m_ScaleY
Center size encoding scale y.
const ConstTensorHandle * m_Anchors
float m_NmsScoreThreshold
NMS score threshold.
unsigned int GetNumElements() const
Definition: Tensor.hpp:192
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })