// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include #include #include #include #include #include #include #include #include namespace { using FloatData = std::vector; using QuantData = std::pair; struct TestData { static const armnn::TensorShape s_BoxEncodingsShape; static const armnn::TensorShape s_ScoresShape; static const armnn::TensorShape s_AnchorsShape; static const QuantData s_BoxEncodingsQuantData; static const QuantData s_ScoresQuantData; static const QuantData s_AnchorsQuantData; static const FloatData s_BoxEncodings; static const FloatData s_Scores; static const FloatData s_Anchors; }; struct RegularNmsExpectedResults { static const FloatData s_DetectionBoxes; static const FloatData s_DetectionScores; static const FloatData s_DetectionClasses; static const FloatData s_NumDetections; }; struct FastNmsExpectedResults { static const FloatData s_DetectionBoxes; static const FloatData s_DetectionScores; static const FloatData s_DetectionClasses; static const FloatData s_NumDetections; }; const armnn::TensorShape TestData::s_BoxEncodingsShape = { 1, 6, 4 }; const armnn::TensorShape TestData::s_ScoresShape = { 1, 6, 3 }; const armnn::TensorShape TestData::s_AnchorsShape = { 6, 4 }; const QuantData TestData::s_BoxEncodingsQuantData = { 1.00f, 1 }; const QuantData TestData::s_ScoresQuantData = { 0.01f, 0 }; const QuantData TestData::s_AnchorsQuantData = { 0.50f, 0 }; const FloatData TestData::s_BoxEncodings = { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }; const FloatData TestData::s_Scores = { 0.0f, 0.90f, 0.80f, 0.0f, 0.75f, 0.72f, 0.0f, 0.60f, 0.50f, 0.0f, 0.93f, 0.95f, 0.0f, 0.50f, 0.40f, 0.0f, 0.30f, 0.20f }; const FloatData TestData::s_Anchors = { 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f }; const FloatData RegularNmsExpectedResults::s_DetectionBoxes = { 0.0f, 10.0f, 1.0f, 11.0f, 0.0f, 10.0f, 1.0f, 11.0f, 0.0f, 0.0f, 0.0f, 0.0f }; const FloatData RegularNmsExpectedResults::s_DetectionScores = { 0.95f, 0.93f, 0.0f }; const FloatData RegularNmsExpectedResults::s_DetectionClasses = { 1.0f, 0.0f, 0.0f }; const FloatData RegularNmsExpectedResults::s_NumDetections = { 2.0f }; const FloatData FastNmsExpectedResults::s_DetectionBoxes = { 0.0f, 10.0f, 1.0f, 11.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 100.0f, 1.0f, 101.0f }; const FloatData FastNmsExpectedResults::s_DetectionScores = { 0.95f, 0.9f, 0.3f }; const FloatData FastNmsExpectedResults::s_DetectionClasses = { 1.0f, 0.0f, 0.0f }; const FloatData FastNmsExpectedResults::s_NumDetections = { 3.0f }; } // anonymous namespace template> void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo, const armnn::TensorInfo& scoresInfo, const armnn::TensorInfo& anchorsInfo, const std::vector& boxEncodingsData, const std::vector& scoresData, const std::vector& anchorsData, const std::vector& expectedDetectionBoxes, const std::vector& expectedDetectionClasses, const std::vector& expectedDetectionScores, const std::vector& expectedNumDetections, bool useRegularNms) { std::unique_ptr profiler = std::make_unique(); armnn::ProfilerManager::GetInstance().RegisterProfiler(profiler.get()); auto memoryManager = WorkloadFactoryHelper::GetMemoryManager(); FactoryType workloadFactory = WorkloadFactoryHelper::GetFactory(memoryManager); auto boxEncodings = MakeTensor(boxEncodingsInfo, boxEncodingsData); auto scores = MakeTensor(scoresInfo, scoresData); auto anchors = MakeTensor(anchorsInfo, anchorsData); armnn::TensorInfo detectionBoxesInfo({ 1, 3, 4 }, armnn::DataType::Float32); armnn::TensorInfo detectionScoresInfo({ 1, 3 }, armnn::DataType::Float32); armnn::TensorInfo detectionClassesInfo({ 1, 3 }, armnn::DataType::Float32); armnn::TensorInfo numDetectionInfo({ 1 }, armnn::DataType::Float32); LayerTestResult detectionBoxesResult(detectionBoxesInfo); detectionBoxesResult.outputExpected = MakeTensor(detectionBoxesInfo, expectedDetectionBoxes); LayerTestResult detectionClassesResult(detectionClassesInfo); detectionClassesResult.outputExpected = MakeTensor(detectionClassesInfo, expectedDetectionClasses); LayerTestResult detectionScoresResult(detectionScoresInfo); detectionScoresResult.outputExpected = MakeTensor(detectionScoresInfo, expectedDetectionScores); LayerTestResult numDetectionsResult(numDetectionInfo); numDetectionsResult.outputExpected = MakeTensor(numDetectionInfo, expectedNumDetections); std::unique_ptr boxedHandle = workloadFactory.CreateTensorHandle(boxEncodingsInfo); std::unique_ptr scoreshandle = workloadFactory.CreateTensorHandle(scoresInfo); std::unique_ptr anchorsHandle = workloadFactory.CreateTensorHandle(anchorsInfo); std::unique_ptr outputBoxesHandle = workloadFactory.CreateTensorHandle(detectionBoxesInfo); std::unique_ptr classesHandle = workloadFactory.CreateTensorHandle(detectionClassesInfo); std::unique_ptr outputScoresHandle = workloadFactory.CreateTensorHandle(detectionScoresInfo); std::unique_ptr numDetectionHandle = workloadFactory.CreateTensorHandle(numDetectionInfo); armnn::ScopedCpuTensorHandle anchorsTensor(anchorsInfo); AllocateAndCopyDataToITensorHandle(&anchorsTensor, &anchors[0][0]); armnn::DetectionPostProcessQueueDescriptor data; data.m_Parameters.m_UseRegularNms = useRegularNms; data.m_Parameters.m_MaxDetections = 3; data.m_Parameters.m_MaxClassesPerDetection = 1; data.m_Parameters.m_DetectionsPerClass =1; data.m_Parameters.m_NmsScoreThreshold = 0.0; data.m_Parameters.m_NmsIouThreshold = 0.5; data.m_Parameters.m_NumClasses = 2; data.m_Parameters.m_ScaleY = 10.0; data.m_Parameters.m_ScaleX = 10.0; data.m_Parameters.m_ScaleH = 5.0; data.m_Parameters.m_ScaleW = 5.0; data.m_Anchors = &anchorsTensor; armnn::WorkloadInfo info; AddInputToWorkload(data, info, boxEncodingsInfo, boxedHandle.get()); AddInputToWorkload(data, info, scoresInfo, scoreshandle.get()); AddOutputToWorkload(data, info, detectionBoxesInfo, outputBoxesHandle.get()); AddOutputToWorkload(data, info, detectionClassesInfo, classesHandle.get()); AddOutputToWorkload(data, info, detectionScoresInfo, outputScoresHandle.get()); AddOutputToWorkload(data, info, numDetectionInfo, numDetectionHandle.get()); std::unique_ptr workload = workloadFactory.CreateDetectionPostProcess(data, info); boxedHandle->Allocate(); scoreshandle->Allocate(); outputBoxesHandle->Allocate(); classesHandle->Allocate(); outputScoresHandle->Allocate(); numDetectionHandle->Allocate(); CopyDataToITensorHandle(boxedHandle.get(), boxEncodings.origin()); CopyDataToITensorHandle(scoreshandle.get(), scores.origin()); workload->Execute(); CopyDataFromITensorHandle(detectionBoxesResult.output.origin(), outputBoxesHandle.get()); CopyDataFromITensorHandle(detectionClassesResult.output.origin(), classesHandle.get()); CopyDataFromITensorHandle(detectionScoresResult.output.origin(), outputScoresHandle.get()); CopyDataFromITensorHandle(numDetectionsResult.output.origin(), numDetectionHandle.get()); BOOST_TEST(CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected)); BOOST_TEST(CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected)); BOOST_TEST(CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected)); BOOST_TEST(CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected)); } template> void QuantizeData(RawType* quant, const float* dequant, const armnn::TensorInfo& info) { for (size_t i = 0; i < info.GetNumElements(); i++) { quant[i] = armnn::Quantize( dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset()); } } template void DetectionPostProcessRegularNmsFloatTest() { return DetectionPostProcessImpl( armnn::TensorInfo(TestData::s_BoxEncodingsShape, armnn::DataType::Float32), armnn::TensorInfo(TestData::s_ScoresShape, armnn::DataType::Float32), armnn::TensorInfo(TestData::s_AnchorsShape, armnn::DataType::Float32), TestData::s_BoxEncodings, TestData::s_Scores, TestData::s_Anchors, RegularNmsExpectedResults::s_DetectionBoxes, RegularNmsExpectedResults::s_DetectionClasses, RegularNmsExpectedResults::s_DetectionScores, RegularNmsExpectedResults::s_NumDetections, true); } template> void DetectionPostProcessRegularNmsQuantizedTest() { armnn::TensorInfo boxEncodingsInfo(TestData::s_BoxEncodingsShape, QuantizedType); armnn::TensorInfo scoresInfo(TestData::s_ScoresShape, QuantizedType); armnn::TensorInfo anchorsInfo(TestData::s_AnchorsShape, QuantizedType); boxEncodingsInfo.SetQuantizationScale(TestData::s_BoxEncodingsQuantData.first); boxEncodingsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second); scoresInfo.SetQuantizationScale(TestData::s_ScoresQuantData.first); scoresInfo.SetQuantizationOffset(TestData::s_ScoresQuantData.second); anchorsInfo.SetQuantizationScale(TestData::s_AnchorsQuantData.first); anchorsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second); std::vector boxEncodingsData(TestData::s_BoxEncodingsShape.GetNumElements()); QuantizeData(boxEncodingsData.data(), TestData::s_BoxEncodings.data(), boxEncodingsInfo); std::vector scoresData(TestData::s_ScoresShape.GetNumElements()); QuantizeData(scoresData.data(), TestData::s_Scores.data(), scoresInfo); std::vector anchorsData(TestData::s_AnchorsShape.GetNumElements()); QuantizeData(anchorsData.data(), TestData::s_Anchors.data(), anchorsInfo); return DetectionPostProcessImpl( boxEncodingsInfo, scoresInfo, anchorsInfo, boxEncodingsData, scoresData, anchorsData, RegularNmsExpectedResults::s_DetectionBoxes, RegularNmsExpectedResults::s_DetectionClasses, RegularNmsExpectedResults::s_DetectionScores, RegularNmsExpectedResults::s_NumDetections, true); } template void DetectionPostProcessFastNmsFloatTest() { return DetectionPostProcessImpl( armnn::TensorInfo(TestData::s_BoxEncodingsShape, armnn::DataType::Float32), armnn::TensorInfo(TestData::s_ScoresShape, armnn::DataType::Float32), armnn::TensorInfo(TestData::s_AnchorsShape, armnn::DataType::Float32), TestData::s_BoxEncodings, TestData::s_Scores, TestData::s_Anchors, FastNmsExpectedResults::s_DetectionBoxes, FastNmsExpectedResults::s_DetectionClasses, FastNmsExpectedResults::s_DetectionScores, FastNmsExpectedResults::s_NumDetections, false); } template> void DetectionPostProcessFastNmsQuantizedTest() { armnn::TensorInfo boxEncodingsInfo(TestData::s_BoxEncodingsShape, QuantizedType); armnn::TensorInfo scoresInfo(TestData::s_ScoresShape, QuantizedType); armnn::TensorInfo anchorsInfo(TestData::s_AnchorsShape, QuantizedType); boxEncodingsInfo.SetQuantizationScale(TestData::s_BoxEncodingsQuantData.first); boxEncodingsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second); scoresInfo.SetQuantizationScale(TestData::s_ScoresQuantData.first); scoresInfo.SetQuantizationOffset(TestData::s_ScoresQuantData.second); anchorsInfo.SetQuantizationScale(TestData::s_AnchorsQuantData.first); anchorsInfo.SetQuantizationOffset(TestData::s_BoxEncodingsQuantData.second); std::vector boxEncodingsData(TestData::s_BoxEncodingsShape.GetNumElements()); QuantizeData(boxEncodingsData.data(), TestData::s_BoxEncodings.data(), boxEncodingsInfo); std::vector scoresData(TestData::s_ScoresShape.GetNumElements()); QuantizeData(scoresData.data(), TestData::s_Scores.data(), scoresInfo); std::vector anchorsData(TestData::s_AnchorsShape.GetNumElements()); QuantizeData(anchorsData.data(), TestData::s_Anchors.data(), anchorsInfo); return DetectionPostProcessImpl( boxEncodingsInfo, scoresInfo, anchorsInfo, boxEncodingsData, scoresData, anchorsData, FastNmsExpectedResults::s_DetectionBoxes, FastNmsExpectedResults::s_DetectionClasses, FastNmsExpectedResults::s_DetectionScores, FastNmsExpectedResults::s_NumDetections, false); }