ArmNN
 21.02
DetectionPostProcessTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<typename FactoryType , armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void DetectionPostProcessImpl (const armnn::TensorInfo &boxEncodingsInfo, const armnn::TensorInfo &scoresInfo, const armnn::TensorInfo &anchorsInfo, const std::vector< T > &boxEncodingsData, const std::vector< T > &scoresData, const std::vector< T > &anchorsData, const std::vector< float > &expectedDetectionBoxes, const std::vector< float > &expectedDetectionClasses, const std::vector< float > &expectedDetectionScores, const std::vector< float > &expectedNumDetections, bool useRegularNms)
 
template<armnn::DataType QuantizedType, typename RawType = armnn::ResolveType<QuantizedType>>
void QuantizeData (RawType *quant, const float *dequant, const armnn::TensorInfo &info)
 
template<typename FactoryType >
void DetectionPostProcessRegularNmsFloatTest ()
 
template<typename FactoryType , armnn::DataType QuantizedType, typename RawType = armnn::ResolveType<QuantizedType>>
void DetectionPostProcessRegularNmsQuantizedTest ()
 
template<typename FactoryType >
void DetectionPostProcessFastNmsFloatTest ()
 
template<typename FactoryType , armnn::DataType QuantizedType, typename RawType = armnn::ResolveType<QuantizedType>>
void DetectionPostProcessFastNmsQuantizedTest ()
 

Function Documentation

◆ DetectionPostProcessFastNmsFloatTest()

void DetectionPostProcessFastNmsFloatTest ( )

Definition at line 309 of file DetectionPostProcessTestImpl.hpp.

References armnn::Float32.

310 {
311  return DetectionPostProcessImpl<FactoryType, armnn::DataType::Float32>(
312  armnn::TensorInfo(TestData::s_BoxEncodingsShape, armnn::DataType::Float32),
313  armnn::TensorInfo(TestData::s_ScoresShape, armnn::DataType::Float32),
314  armnn::TensorInfo(TestData::s_AnchorsShape, armnn::DataType::Float32),
315  TestData::s_BoxEncodings,
316  TestData::s_Scores,
317  TestData::s_Anchors,
318  FastNmsExpectedResults::s_DetectionBoxes,
319  FastNmsExpectedResults::s_DetectionClasses,
320  FastNmsExpectedResults::s_DetectionScores,
321  FastNmsExpectedResults::s_NumDetections,
322  false);
323 }

◆ DetectionPostProcessFastNmsQuantizedTest()

void DetectionPostProcessFastNmsQuantizedTest ( )

Definition at line 328 of file DetectionPostProcessTestImpl.hpp.

References anchorsInfo, scoresInfo, TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

Referenced by BOOST_AUTO_TEST_CASE().

329 {
330  armnn::TensorInfo boxEncodingsInfo(TestData::s_BoxEncodingsShape, QuantizedType);
331  armnn::TensorInfo scoresInfo(TestData::s_ScoresShape, QuantizedType);
332  armnn::TensorInfo anchorsInfo(TestData::s_AnchorsShape, QuantizedType);
333 
334  boxEncodingsInfo.SetQuantizationScale(TestData::s_BoxEncodingsQuantData.first);
335  boxEncodingsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second);
336 
337  scoresInfo.SetQuantizationScale(TestData::s_ScoresQuantData.first);
338  scoresInfo.SetQuantizationOffset(TestData::s_ScoresQuantData.second);
339 
340  anchorsInfo.SetQuantizationScale(TestData::s_AnchorsQuantData.first);
341  anchorsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second);
342 
343  std::vector<RawType> boxEncodingsData(TestData::s_BoxEncodingsShape.GetNumElements());
344  QuantizeData<QuantizedType>(boxEncodingsData.data(),
345  TestData::s_BoxEncodings.data(),
346  boxEncodingsInfo);
347 
348  std::vector<RawType> scoresData(TestData::s_ScoresShape.GetNumElements());
349  QuantizeData<QuantizedType>(scoresData.data(),
350  TestData::s_Scores.data(),
351  scoresInfo);
352 
353  std::vector<RawType> anchorsData(TestData::s_AnchorsShape.GetNumElements());
354  QuantizeData<QuantizedType>(anchorsData.data(),
355  TestData::s_Anchors.data(),
356  anchorsInfo);
357 
358  return DetectionPostProcessImpl<FactoryType, QuantizedType>(
359  boxEncodingsInfo,
360  scoresInfo,
361  anchorsInfo,
362  boxEncodingsData,
363  scoresData,
364  anchorsData,
365  FastNmsExpectedResults::s_DetectionBoxes,
366  FastNmsExpectedResults::s_DetectionClasses,
367  FastNmsExpectedResults::s_DetectionScores,
368  FastNmsExpectedResults::s_NumDetections,
369  false);
370 }
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480

◆ DetectionPostProcessImpl()

void DetectionPostProcessImpl ( const armnn::TensorInfo boxEncodingsInfo,
const armnn::TensorInfo scoresInfo,
const armnn::TensorInfo anchorsInfo,
const std::vector< T > &  boxEncodingsData,
const std::vector< T > &  scoresData,
const std::vector< T > &  anchorsData,
const std::vector< float > &  expectedDetectionBoxes,
const std::vector< float > &  expectedDetectionClasses,
const std::vector< float > &  expectedDetectionScores,
const std::vector< float > &  expectedNumDetections,
bool  useRegularNms 
)

Definition at line 139 of file DetectionPostProcessTestImpl.hpp.

References AllocateAndCopyDataToITensorHandle(), anchors(), anchorsInfo, boxEncodings(), CompareTensors(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), armnn::Float32, ProfilerManager::GetInstance(), DetectionPostProcessQueueDescriptor::m_Anchors, DetectionPostProcessDescriptor::m_DetectionsPerClass, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, DetectionPostProcessDescriptor::m_UseRegularNms, ProfilerManager::RegisterProfiler(), scores(), and scoresInfo.

150 {
151  std::unique_ptr<armnn::IProfiler> profiler = std::make_unique<armnn::IProfiler>();
153 
154  auto memoryManager = WorkloadFactoryHelper<FactoryType>::GetMemoryManager();
155  FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
156  auto tensorHandleFactory = WorkloadFactoryHelper<FactoryType>::GetTensorHandleFactory(memoryManager);
157 
158  auto boxEncodings = MakeTensor<T, 3>(boxEncodingsInfo, boxEncodingsData);
159  auto scores = MakeTensor<T, 3>(scoresInfo, scoresData);
160  auto anchors = MakeTensor<T, 2>(anchorsInfo, anchorsData);
161 
162  armnn::TensorInfo detectionBoxesInfo({ 1, 3, 4 }, armnn::DataType::Float32);
163  armnn::TensorInfo detectionScoresInfo({ 1, 3 }, armnn::DataType::Float32);
164  armnn::TensorInfo detectionClassesInfo({ 1, 3 }, armnn::DataType::Float32);
165  armnn::TensorInfo numDetectionInfo({ 1 }, armnn::DataType::Float32);
166 
167  LayerTestResult<float, 3> detectionBoxesResult(detectionBoxesInfo);
168  detectionBoxesResult.outputExpected = MakeTensor<float, 3>(detectionBoxesInfo, expectedDetectionBoxes);
169  LayerTestResult<float, 2> detectionClassesResult(detectionClassesInfo);
170  detectionClassesResult.outputExpected = MakeTensor<float, 2>(detectionClassesInfo, expectedDetectionClasses);
171  LayerTestResult<float, 2> detectionScoresResult(detectionScoresInfo);
172  detectionScoresResult.outputExpected = MakeTensor<float, 2>(detectionScoresInfo, expectedDetectionScores);
173  LayerTestResult<float, 1> numDetectionsResult(numDetectionInfo);
174  numDetectionsResult.outputExpected = MakeTensor<float, 1>(numDetectionInfo, expectedNumDetections);
175 
176  auto boxedHandle = tensorHandleFactory.CreateTensorHandle(boxEncodingsInfo);
177  auto scoreshandle = tensorHandleFactory.CreateTensorHandle(scoresInfo);
178  auto anchorsHandle = tensorHandleFactory.CreateTensorHandle(anchorsInfo);
179  auto outputBoxesHandle = tensorHandleFactory.CreateTensorHandle(detectionBoxesInfo);
180  auto classesHandle = tensorHandleFactory.CreateTensorHandle(detectionClassesInfo);
181  auto outputScoresHandle = tensorHandleFactory.CreateTensorHandle(detectionScoresInfo);
182  auto numDetectionHandle = tensorHandleFactory.CreateTensorHandle(numDetectionInfo);
183 
184  armnn::ScopedCpuTensorHandle anchorsTensor(anchorsInfo);
185  AllocateAndCopyDataToITensorHandle(&anchorsTensor, &anchors[0][0]);
186 
188  data.m_Parameters.m_UseRegularNms = useRegularNms;
189  data.m_Parameters.m_MaxDetections = 3;
193  data.m_Parameters.m_NmsIouThreshold = 0.5;
194  data.m_Parameters.m_NumClasses = 2;
195  data.m_Parameters.m_ScaleY = 10.0;
196  data.m_Parameters.m_ScaleX = 10.0;
197  data.m_Parameters.m_ScaleH = 5.0;
198  data.m_Parameters.m_ScaleW = 5.0;
199  data.m_Anchors = &anchorsTensor;
200 
202  AddInputToWorkload(data, info, boxEncodingsInfo, boxedHandle.get());
203  AddInputToWorkload(data, info, scoresInfo, scoreshandle.get());
204  AddOutputToWorkload(data, info, detectionBoxesInfo, outputBoxesHandle.get());
205  AddOutputToWorkload(data, info, detectionClassesInfo, classesHandle.get());
206  AddOutputToWorkload(data, info, detectionScoresInfo, outputScoresHandle.get());
207  AddOutputToWorkload(data, info, numDetectionInfo, numDetectionHandle.get());
208 
209  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDetectionPostProcess(data, info);
210 
211  boxedHandle->Allocate();
212  scoreshandle->Allocate();
213  outputBoxesHandle->Allocate();
214  classesHandle->Allocate();
215  outputScoresHandle->Allocate();
216  numDetectionHandle->Allocate();
217 
218  CopyDataToITensorHandle(boxedHandle.get(), boxEncodings.origin());
219  CopyDataToITensorHandle(scoreshandle.get(), scores.origin());
220 
221  workload->Execute();
222 
223  CopyDataFromITensorHandle(detectionBoxesResult.output.origin(), outputBoxesHandle.get());
224  CopyDataFromITensorHandle(detectionClassesResult.output.origin(), classesHandle.get());
225  CopyDataFromITensorHandle(detectionScoresResult.output.origin(), outputScoresHandle.get());
226  CopyDataFromITensorHandle(numDetectionsResult.output.origin(), numDetectionHandle.get());
227 
228  BOOST_TEST(CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected));
229  BOOST_TEST(CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected));
230  BOOST_TEST(CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected));
231  BOOST_TEST(CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected));
232 }
float m_ScaleW
Center size encoding scale weight.
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:489
float m_ScaleX
Center size encoding scale x.
boost::test_tools::predicate_result CompareTensors(const boost::multi_array< T, n > &a, const boost::multi_array< T, n > &b, bool compareBoolean=false, bool isDynamic=false)
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
uint32_t m_MaxDetections
Maximum numbers of detections.
float m_NmsIouThreshold
Intersection over union threshold.
uint32_t m_NumClasses
Number of classes.
const ConstCpuTensorHandle * m_Anchors
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
bool m_UseRegularNms
Use Regular NMS.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
float m_ScaleH
Center size encoding scale height.
void RegisterProfiler(IProfiler *profiler)
Definition: Profiling.cpp:496
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
ClWorkloadFactory FactoryType
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
Contains information about inputs and outputs to a layer.
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })

◆ DetectionPostProcessRegularNmsFloatTest()

void DetectionPostProcessRegularNmsFloatTest ( )

Definition at line 245 of file DetectionPostProcessTestImpl.hpp.

References armnn::Float32.

246 {
247  return DetectionPostProcessImpl<FactoryType, armnn::DataType::Float32>(
248  armnn::TensorInfo(TestData::s_BoxEncodingsShape, armnn::DataType::Float32),
249  armnn::TensorInfo(TestData::s_ScoresShape, armnn::DataType::Float32),
250  armnn::TensorInfo(TestData::s_AnchorsShape, armnn::DataType::Float32),
251  TestData::s_BoxEncodings,
252  TestData::s_Scores,
253  TestData::s_Anchors,
254  RegularNmsExpectedResults::s_DetectionBoxes,
255  RegularNmsExpectedResults::s_DetectionClasses,
256  RegularNmsExpectedResults::s_DetectionScores,
257  RegularNmsExpectedResults::s_NumDetections,
258  true);
259 }

◆ DetectionPostProcessRegularNmsQuantizedTest()

void DetectionPostProcessRegularNmsQuantizedTest ( )

Definition at line 264 of file DetectionPostProcessTestImpl.hpp.

References anchorsInfo, scoresInfo, TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

Referenced by BOOST_AUTO_TEST_CASE().

265 {
266  armnn::TensorInfo boxEncodingsInfo(TestData::s_BoxEncodingsShape, QuantizedType);
267  armnn::TensorInfo scoresInfo(TestData::s_ScoresShape, QuantizedType);
268  armnn::TensorInfo anchorsInfo(TestData::s_AnchorsShape, QuantizedType);
269 
270  boxEncodingsInfo.SetQuantizationScale(TestData::s_BoxEncodingsQuantData.first);
271  boxEncodingsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second);
272 
273  scoresInfo.SetQuantizationScale(TestData::s_ScoresQuantData.first);
274  scoresInfo.SetQuantizationOffset(TestData::s_ScoresQuantData.second);
275 
276  anchorsInfo.SetQuantizationScale(TestData::s_AnchorsQuantData.first);
277  anchorsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second);
278 
279  std::vector<RawType> boxEncodingsData(TestData::s_BoxEncodingsShape.GetNumElements());
280  QuantizeData<QuantizedType>(boxEncodingsData.data(),
281  TestData::s_BoxEncodings.data(),
282  boxEncodingsInfo);
283 
284  std::vector<RawType> scoresData(TestData::s_ScoresShape.GetNumElements());
285  QuantizeData<QuantizedType>(scoresData.data(),
286  TestData::s_Scores.data(),
287  scoresInfo);
288 
289  std::vector<RawType> anchorsData(TestData::s_AnchorsShape.GetNumElements());
290  QuantizeData<QuantizedType>(anchorsData.data(),
291  TestData::s_Anchors.data(),
292  anchorsInfo);
293 
294  return DetectionPostProcessImpl<FactoryType, QuantizedType>(
295  boxEncodingsInfo,
296  scoresInfo,
297  anchorsInfo,
298  boxEncodingsData,
299  scoresData,
300  anchorsData,
301  RegularNmsExpectedResults::s_DetectionBoxes,
302  RegularNmsExpectedResults::s_DetectionClasses,
303  RegularNmsExpectedResults::s_DetectionScores,
304  RegularNmsExpectedResults::s_NumDetections,
305  true);
306 }
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480

◆ QuantizeData()

void QuantizeData ( RawType *  quant,
const float *  dequant,
const armnn::TensorInfo info 
)

Definition at line 235 of file DetectionPostProcessTestImpl.hpp.

References TensorInfo::GetNumElements(), TensorInfo::GetQuantizationOffset(), and TensorInfo::GetQuantizationScale().

236 {
237  for (size_t i = 0; i < info.GetNumElements(); i++)
238  {
239  quant[i] = armnn::Quantize<RawType>(
240  dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
241  }
242 }
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:469
float GetQuantizationScale() const
Definition: Tensor.cpp:452
unsigned int GetNumElements() const
Definition: Tensor.hpp:192