From fb14ebbd68e04876809145296af96f6f41857418 Mon Sep 17 00:00:00 2001 From: James Ward Date: Thu, 26 Nov 2020 11:08:12 +0000 Subject: IVGCVSW-5348 Update Doxygen Docu * Update Doxygen Documentation for 20.11 release Signed-off-by: James Ward Change-Id: Ib47edac7923a642a277b1169d1085e5622021dc0 --- ...tection_post_process_workload_8cpp_source.xhtml | 149 +++++++++++++++++++++ 1 file changed, 149 insertions(+) create mode 100644 20.11/_neon_detection_post_process_workload_8cpp_source.xhtml (limited to '20.11/_neon_detection_post_process_workload_8cpp_source.xhtml') diff --git a/20.11/_neon_detection_post_process_workload_8cpp_source.xhtml b/20.11/_neon_detection_post_process_workload_8cpp_source.xhtml new file mode 100644 index 0000000000..a4389d796a --- /dev/null +++ b/20.11/_neon_detection_post_process_workload_8cpp_source.xhtml @@ -0,0 +1,149 @@ + + + + + + + + + + + + + +ArmNN: src/backends/neon/workloads/NeonDetectionPostProcessWorkload.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.11 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
NeonDetectionPostProcessWorkload.cpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "NeonWorkloadUtils.hpp"
9 
13 
14 namespace armnn
15 {
16 
17 arm_compute::DetectionPostProcessLayerInfo MakeInfo(const DetectionPostProcessDescriptor& desc)
18 {
19  return arm_compute::DetectionPostProcessLayerInfo(desc.m_MaxDetections,
22  desc.m_NmsIouThreshold,
23  desc.m_NumClasses,
24  { desc.m_ScaleX,
25  desc.m_ScaleY,
26  desc.m_ScaleW,
27  desc.m_ScaleH },
28  desc.m_UseRegularNms,
30 }
31 
33  const TensorInfo& scores,
34  const TensorInfo& anchors,
35  const TensorInfo& detectionBoxes,
36  const TensorInfo& detectionClasses,
37  const TensorInfo& detectionScores,
38  const TensorInfo& numDetections,
40 {
41  arm_compute::DetectionPostProcessLayerInfo info = MakeInfo(desc);
42 
43  const arm_compute::TensorInfo aclBoxEncodings =
44  armcomputetensorutils::BuildArmComputeTensorInfo(boxEncodings);
45 
46  const arm_compute::TensorInfo aclScores =
47  armcomputetensorutils::BuildArmComputeTensorInfo(scores);
48 
49  const arm_compute::TensorInfo aclAnchors =
50  armcomputetensorutils::BuildArmComputeTensorInfo(anchors);
51 
52  arm_compute::TensorInfo aclDetectionBoxes =
53  armcomputetensorutils::BuildArmComputeTensorInfo(detectionBoxes);
54 
55  arm_compute::TensorInfo aclDetectionClasses =
56  armcomputetensorutils::BuildArmComputeTensorInfo(detectionClasses);
57 
58  arm_compute::TensorInfo aclDetectionScores =
59  armcomputetensorutils::BuildArmComputeTensorInfo(detectionScores);
60 
61  arm_compute::TensorInfo aclNumDetections =
62  armcomputetensorutils::BuildArmComputeTensorInfo(numDetections);
63 
64  return arm_compute::NEDetectionPostProcessLayer::validate(
65  &aclBoxEncodings,
66  &aclScores,
67  &aclAnchors,
68  &aclDetectionBoxes,
69  &aclDetectionClasses,
70  &aclDetectionScores,
71  &aclNumDetections,
72  info);
73 }
74 
76  const DetectionPostProcessQueueDescriptor& descriptor,
77  const WorkloadInfo& info)
79 {
80  m_Anchors = std::make_unique<arm_compute::Tensor>();
81  BuildArmComputeTensor(*m_Anchors, descriptor.m_Anchors->GetTensorInfo());
82 
83  arm_compute::DetectionPostProcessLayerInfo di = MakeInfo(m_Data.m_Parameters);
84 
85  auto AclTensorRef = [](ITensorHandle* tensor) -> arm_compute::ITensor&
86  {
87  return PolymorphicDowncast<IAclTensorHandle*>(tensor)->GetTensor();
88  };
89 
90  arm_compute::ITensor& boxEncodings = AclTensorRef(m_Data.m_Inputs[0]);
91  arm_compute::ITensor& scores = AclTensorRef(m_Data.m_Inputs[1]);
92 
93  arm_compute::ITensor& detectionBoxes = AclTensorRef(m_Data.m_Outputs[0]);
94  arm_compute::ITensor& detectionClasses = AclTensorRef(m_Data.m_Outputs[1]);
95  arm_compute::ITensor& detectionScores = AclTensorRef(m_Data.m_Outputs[2]);
96  arm_compute::ITensor& numDetections = AclTensorRef(m_Data.m_Outputs[3]);
97 
98  m_Func.configure(&boxEncodings, &scores, m_Anchors.get(),
99  &detectionBoxes, &detectionClasses, &detectionScores, &numDetections,
100  di);
101 
103 }
104 
106 {
107  ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDetectionPostProcessWorkload_Execute");
108  m_Func.run();
109 }
110 
111 } // namespace armnn
+
+ +
const DetectionPostProcessQueueDescriptor m_Data
Definition: Workload.hpp:46
+
#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name)
+
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
+ +
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
+
Copyright (c) 2020 ARM Limited.
+ +
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
+ +
uint32_t m_MaxDetections
Maximum numbers of detections.
+
arm_compute::Status NeonDetectionPostProcessValidate(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &desc)
+ +
float m_NmsIouThreshold
Intersection over union threshold.
+ +
NeonDetectionPostProcessWorkload(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info)
+
Status
enumeration
Definition: Types.hpp:26
+
uint32_t m_NumClasses
Number of classes.
+ +
const ConstCpuTensorHandle * m_Anchors
+
bool m_UseRegularNms
Use Regular NMS.
+
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, const ConstCpuTensorHandle *handle)
+
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
+
std::vector< ITensorHandle * > m_Outputs
+ + +
Contains information about inputs and outputs to a layer.
+
float m_NmsScoreThreshold
NMS score threshold.
+
std::vector< ITensorHandle * > m_Inputs
+
arm_compute::DetectionPostProcessLayerInfo MakeInfo(const DetectionPostProcessDescriptor &desc)
+
+
virtual void Execute() const override
+
const TensorInfo & GetTensorInfo() const
+
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
+
+
+ + + + -- cgit v1.2.1