ArmNN
 22.05
RefDetectionPostProcessWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "Decoders.hpp"
10 #include "Profiling.hpp"
11 #include "RefWorkloadUtils.hpp"
12 
13 namespace armnn
14 {
15 
17  const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info)
19  m_Anchors(std::make_unique<ScopedTensorHandle>(*(descriptor.m_Anchors))) {}
20 
22 {
24 }
25 
27 {
28  Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
29 }
30 
31 void RefDetectionPostProcessWorkload::Execute(std::vector<ITensorHandle*> inputs,
32  std::vector<ITensorHandle*> outputs) const
33 {
34  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDetectionPostProcessWorkload_Execute");
35 
36  const TensorInfo& boxEncodingsInfo = GetTensorInfo(inputs[0]);
37  const TensorInfo& scoresInfo = GetTensorInfo(inputs[1]);
38  const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
39 
40  const TensorInfo& detectionBoxesInfo = GetTensorInfo(outputs[0]);
41  const TensorInfo& detectionClassesInfo = GetTensorInfo(outputs[1]);
42  const TensorInfo& detectionScoresInfo = GetTensorInfo(outputs[2]);
43  const TensorInfo& numDetectionsInfo = GetTensorInfo(outputs[3]);
44 
45  auto boxEncodings = MakeDecoder<float>(boxEncodingsInfo, inputs[0]->Map());
46  auto scores = MakeDecoder<float>(scoresInfo, inputs[1]->Map());
47  auto anchors = MakeDecoder<float>(anchorsInfo, m_Anchors->Map(false));
48 
49  float* detectionBoxes = reinterpret_cast<float*>(outputs[0]->Map());
50  float* detectionClasses = reinterpret_cast<float*>(outputs[1]->Map());
51  float* detectionScores = reinterpret_cast<float*>(outputs[2]->Map());
52  float* numDetections = reinterpret_cast<float*>(outputs[3]->Map());
53 
54  DetectionPostProcess(boxEncodingsInfo, scoresInfo, anchorsInfo,
55  detectionBoxesInfo, detectionClassesInfo,
56  detectionScoresInfo, numDetectionsInfo, m_Data.m_Parameters,
57  *boxEncodings, *scores, *anchors, detectionBoxes,
58  detectionClasses, detectionScores, numDetections);
59 }
60 
61 } //namespace armnn
CPU Execution: Reference C++ kernels.
void Execute() const override
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
Copyright (c) 2021 ARM Limited and Contributors.
RefDetectionPostProcessWorkload(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info)
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
DetectionPostProcessQueueDescriptor m_Data
Definition: Workload.hpp:81
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers