23.05
|
Go to the documentation of this file.
34 auto layer = CloneBase<DetectionPostProcessLayer>(graph,
m_Param,
GetName());
36 return std::move(layer);
67 "DetectionPostProcessLayer", 1);
72 "DetectionPostProcessLayer", 2);
77 "DetectionPostProcessLayer", 3);
84 std::vector<TensorShape> results;
85 results.push_back({ 1, detectedBoxes, 4 });
86 results.push_back({ 1, detectedBoxes });
87 results.push_back({ 1, detectedBoxes });
88 results.push_back({ 1 });
101 std::vector<armnn::ConstTensor> constTensors { {managedAnchors.
GetTensorInfo(), managedAnchors.
Map()} };
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
ImmutableConstantTensors GetConstantTensorsByRef() const override
Retrieve the handles to the constant values stored by the layer.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
uint32_t m_MaxDetections
Maximum numbers of detections.
const void * Map(bool blocking=true)
RAII Managed resource Unmaps MemoryArea once out of scope.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
const ConstTensorHandle * m_Anchors
void SetAdditionalInfo(QueueDescriptor &descriptor) const
ShapeInferenceMethod m_ShapeInferenceMethod
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
const TensorInfo & GetTensorInfo() const
This layer represents a detection postprocess operator.
Copyright (c) 2021 ARM Limited and Contributors.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the DetectionPostProcess type.
const TensorInfo & GetTensorInfo() const override
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
The model does not specify the output shapes.
std::shared_ptr< ConstTensorHandle > m_Anchors
A unique pointer to store Anchor values.
virtual const TensorInfo & GetTensorInfo() const =0
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
DetectionPostProcessLayer(const DetectionPostProcessDescriptor ¶m, const char *name)
Constructor to create a DetectionPostProcessLayer.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
const TensorShape & GetShape() const
DetectionPostProcessLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
#define ARMNN_ASSERT_MSG(COND, MSG)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
#define ARMNN_ASSERT(COND)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
const char * GetName() const override
Returns the name of the layer.
const DetectionPostProcessDescriptor & GetParameters() const override
DetectionPostProcessDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > >> ImmutableConstantTensors
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of DetectionPostProcessLayer.