18 explicit DetectionPostProcessFixture(
const std::string& custom_options)
39 "builtin_code": "CUSTOM", 40 "custom_code": "TFLite_Detection_PostProcess" 47 "name": "box_encodings", 82 "name": "detection_boxes", 88 "name": "detection_classes", 94 "name": "detection_scores", 100 "name": "num_detections", 105 "outputs": [3, 4, 5, 6], 109 "outputs": [3, 4, 5, 6], 110 "builtin_options_type": 0, 111 "custom_options": [)" + custom_options + R"(], 112 "custom_options_format": "FLEXBUFFERS" 117 { "data": [ 1, 1, 2, 2, 133 struct ParseDetectionPostProcessCustomOptions : DetectionPostProcessFixture
155 ParseDetectionPostProcessCustomOptions()
156 : DetectionPostProcessFixture(
157 GenerateDetectionPostProcessJsonString(GenerateDescriptor()))
161 TEST_CASE_FIXTURE(ParseDetectionPostProcessCustomOptions,
"ParseDetectionPostProcess")
166 using UnquantizedContainer = std::vector<float>;
167 UnquantizedContainer boxEncodings =
169 0.0f, 0.0f, 0.0f, 0.0f,
170 0.0f, 1.0f, 0.0f, 0.0f,
171 0.0f, -1.0f, 0.0f, 0.0f,
172 0.0f, 0.0f, 0.0f, 0.0f,
173 0.0f, 1.0f, 0.0f, 0.0f,
174 0.0f, 0.0f, 0.0f, 0.0f
177 UnquantizedContainer scores =
188 UnquantizedContainer detectionBoxes =
190 0.0f, 10.0f, 1.0f, 11.0f,
191 0.0f, 10.0f, 1.0f, 11.0f,
192 0.0f, 0.0f, 0.0f, 0.0f
195 UnquantizedContainer detectionClasses = { 1.0f, 0.0f, 0.0f };
196 UnquantizedContainer detectionScores = { 0.95f, 0.93f, 0.0f };
198 UnquantizedContainer numDetections = { 2.0f };
201 using QuantizedContainer = std::vector<uint8_t>;
203 QuantizedContainer quantBoxEncodings = armnnUtils::QuantizedVector<uint8_t>(boxEncodings, 1.00f, 1);
204 QuantizedContainer quantScores = armnnUtils::QuantizedVector<uint8_t>(scores, 0.01f, 0);
206 std::map<std::string, QuantizedContainer> input =
208 {
"box_encodings", quantBoxEncodings },
209 {
"scores", quantScores }
212 std::map<std::string, UnquantizedContainer> output =
214 {
"detection_boxes", detectionBoxes},
215 {
"detection_classes", detectionClasses},
216 {
"detection_scores", detectionScores},
217 {
"num_detections", numDetections}
220 RunTest<armnn::DataType::QAsymmU8, armnn::DataType::Float32>(0, input, output);
223 TEST_CASE_FIXTURE(ParseDetectionPostProcessCustomOptions,
"DetectionPostProcessGraphStructureTest")
235 ReadStringToBinary();
244 CHECK((graph.GetNumInputs() == 2));
245 CHECK((graph.GetNumOutputs() == 4));
246 CHECK((graph.GetNumLayers() == 7));
296 CHECK(
IsConnected(boxEncodingLayer, detectionPostProcessLayer, 0, 0, boxEncodingTensor));
297 CHECK(
IsConnected(scoresLayer, detectionPostProcessLayer, 0, 1, scoresTensor));
298 CHECK(
IsConnected(detectionPostProcessLayer, detectionBoxesLayer, 0, 0, detectionBoxesTensor));
299 CHECK(
IsConnected(detectionPostProcessLayer, detectionClassesLayer, 1, 0, detectionClassesTensor));
300 CHECK(
IsConnected(detectionPostProcessLayer, detectionScoresLayer, 2, 0, detectionScoresTensor));
301 CHECK(
IsConnected(detectionPostProcessLayer, numDetectionsLayer, 3, 0, numDetectionsTensor));
float m_ScaleW
Center size encoding scale weight.
CPU Execution: Reference C++ kernels.
armnn::Layer * GetFirstLayerWithName(armnn::Graph &graph, const std::string &name)
float m_ScaleX
Center size encoding scale x.
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
uint32_t m_MaxDetections
Maximum numbers of detections.
TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenNeonAndGpu")
float m_NmsIouThreshold
Intersection over union threshold.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
uint32_t m_NumClasses
Number of classes.
bool m_UseRegularNms
Use Regular NMS.
bool IsConnected(armnn::Layer *srcLayer, armnn::Layer *destLayer, unsigned int srcSlot, unsigned int destSlot, const armnn::TensorInfo &expectedTensorInfo)
float m_ScaleH
Center size encoding scale height.
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
bool CheckNumberOfInputSlot(armnn::Layer *layer, unsigned int num)
bool CheckNumberOfOutputSlot(armnn::Layer *layer, unsigned int num)
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr