From 6d302bfb568962f3b6b6f012b260ce54f22d36a0 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 4 Feb 2019 11:46:26 +0000 Subject: IVGCVSW-2559 End to end tests for Detection PostProcess * end to end tests for Detection PostProcess float and uint8 * add anchors to AddDetectionPostProcessLayer * add anchors to VisitDetectionPostProcessLayer * refactor code Change-Id: I3c5a9a4a60b74c2246b4a27692bbf3c235163f90 Signed-off-by: Narumol Prangnawarat --- src/backends/backendsCommon/WorkloadData.cpp | 4 +- src/backends/backendsCommon/test/CMakeLists.txt | 1 + .../test/DetectionPostProcessTestImpl.hpp | 162 ++++++++++++++++++++ .../backendsCommon/test/EndToEndTestImpl.hpp | 5 +- src/backends/reference/RefWorkloadFactory.cpp | 11 +- .../test/RefDetectionPostProcessTests.cpp | 4 +- src/backends/reference/test/RefEndToEndTests.cpp | 166 +++++++++++++++++++++ .../reference/workloads/DetectionPostProcess.cpp | 16 +- 8 files changed, 355 insertions(+), 14 deletions(-) create mode 100644 src/backends/backendsCommon/test/DetectionPostProcessTestImpl.hpp (limited to 'src/backends') diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index b31d626550..7474b9bc9a 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -1101,8 +1101,8 @@ void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadI const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1]; const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo(); const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0]; - const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[1]; - const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[2]; + const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1]; + const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2]; const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3]; ValidateTensorNumDimensions(boxEncodingsInfo, "DetectionPostProcessQueueDescriptor", 3, "box encodings"); diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 80a9cfeaa9..4a1d467bb7 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -14,6 +14,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources ConvertFp16ToFp32TestImpl.hpp ConvertFp32ToFp16TestImpl.hpp DebugTestImpl.hpp + DetectionPostProcessTestImpl.hpp EndToEndTestImpl.hpp FullyConnectedTestImpl.hpp GatherTestImpl.hpp diff --git a/src/backends/backendsCommon/test/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessTestImpl.hpp new file mode 100644 index 0000000000..5f4d2a480f --- /dev/null +++ b/src/backends/backendsCommon/test/DetectionPostProcessTestImpl.hpp @@ -0,0 +1,162 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include +#include + +namespace{ + +template +armnn::INetworkPtr CreateDetectionPostProcessNetwork(const armnn::TensorInfo& boxEncodingsInfo, + const armnn::TensorInfo& scoresInfo, + const armnn::TensorInfo& anchorsInfo, + const std::vector& anchors, + bool useRegularNms) +{ + armnn::TensorInfo detectionBoxesInfo({ 1, 3, 4 }, armnn::DataType::Float32); + armnn::TensorInfo detectionScoresInfo({ 1, 3 }, armnn::DataType::Float32); + armnn::TensorInfo detectionClassesInfo({ 1, 3 }, armnn::DataType::Float32); + armnn::TensorInfo numDetectionInfo({ 1 }, armnn::DataType::Float32); + + armnn::DetectionPostProcessDescriptor desc; + desc.m_UseRegularNms = useRegularNms; + desc.m_MaxDetections = 3; + desc.m_MaxClassesPerDetection = 1; + desc.m_DetectionsPerClass =1; + desc.m_NmsScoreThreshold = 0.0; + desc.m_NmsIouThreshold = 0.5; + desc.m_NumClasses = 2; + desc.m_ScaleY = 10.0; + desc.m_ScaleX = 10.0; + desc.m_ScaleH = 5.0; + desc.m_ScaleW = 5.0; + + armnn::INetworkPtr net(armnn::INetwork::Create()); + + armnn::IConnectableLayer* boxesLayer = net->AddInputLayer(0); + armnn::IConnectableLayer* scoresLayer = net->AddInputLayer(1); + armnn::ConstTensor anchorsTensor(anchorsInfo, anchors.data()); + armnn::IConnectableLayer* detectionLayer = net->AddDetectionPostProcessLayer(desc, anchorsTensor, + "DetectionPostProcess"); + armnn::IConnectableLayer* detectionBoxesLayer = net->AddOutputLayer(0, "detectionBoxes"); + armnn::IConnectableLayer* detectionClassesLayer = net->AddOutputLayer(1, "detectionClasses"); + armnn::IConnectableLayer* detectionScoresLayer = net->AddOutputLayer(2, "detectionScores"); + armnn::IConnectableLayer* numDetectionLayer = net->AddOutputLayer(3, "numDetection"); + Connect(boxesLayer, detectionLayer, boxEncodingsInfo, 0, 0); + Connect(scoresLayer, detectionLayer, scoresInfo, 0, 1); + Connect(detectionLayer, detectionBoxesLayer, detectionBoxesInfo, 0, 0); + Connect(detectionLayer, detectionClassesLayer, detectionClassesInfo, 1, 0); + Connect(detectionLayer, detectionScoresLayer, detectionScoresInfo, 2, 0); + Connect(detectionLayer, numDetectionLayer, numDetectionInfo, 3, 0); + + return net; +} + +template> +void DetectionPostProcessEndToEnd(const std::vector& backends, bool useRegularNms, + const std::vector& boxEncodings, + const std::vector& scores, + const std::vector& anchors, + const std::vector& expectedDetectionBoxes, + const std::vector& expectedDetectionClasses, + const std::vector& expectedDetectionScores, + const std::vector& expectedNumDetections, + float boxScale = 1.0f, + int32_t boxOffset = 0, + float scoreScale = 1.0f, + int32_t scoreOffset = 0, + float anchorScale = 1.0f, + int32_t anchorOffset = 0) +{ + armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, ArmnnType); + armnn::TensorInfo scoresInfo({ 1, 6, 3}, ArmnnType); + armnn::TensorInfo anchorsInfo({ 6, 4 }, ArmnnType); + + boxEncodingsInfo.SetQuantizationScale(boxScale); + boxEncodingsInfo.SetQuantizationOffset(boxOffset); + scoresInfo.SetQuantizationScale(scoreScale); + scoresInfo.SetQuantizationOffset(scoreOffset); + anchorsInfo.SetQuantizationScale(anchorScale); + anchorsInfo.SetQuantizationOffset(anchorOffset); + + // Builds up the structure of the network + armnn::INetworkPtr net = CreateDetectionPostProcessNetwork(boxEncodingsInfo, scoresInfo, + anchorsInfo, anchors, useRegularNms); + + BOOST_TEST_CHECKPOINT("create a network"); + + std::map> inputTensorData = {{ 0, boxEncodings }, { 1, scores }}; + std::map> expectedOutputData = {{ 0, expectedDetectionBoxes }, + { 1, expectedDetectionClasses }, + { 2, expectedDetectionScores }, + { 3, expectedNumDetections }}; + + EndToEndLayerTestImpl( + move(net), inputTensorData, expectedOutputData, backends); +} + +template> +void DetectionPostProcessRegularNmsEndToEnd(const std::vector& backends, + const std::vector& boxEncodings, + const std::vector& scores, + const std::vector& anchors, + float boxScale = 1.0f, + int32_t boxOffset = 0, + float scoreScale = 1.0f, + int32_t scoreOffset = 0, + float anchorScale = 1.0f, + int32_t anchorOffset = 0) +{ + std::vector expectedDetectionBoxes({ + 0.0f, 10.0f, 1.0f, 11.0f, + 0.0f, 10.0f, 1.0f, 11.0f, + 0.0f, 0.0f, 0.0f, 0.0f + }); + std::vector expectedDetectionScores({ 0.95f, 0.93f, 0.0f }); + std::vector expectedDetectionClasses({ 1.0f, 0.0f, 0.0f }); + std::vector expectedNumDetections({ 2.0f }); + + DetectionPostProcessEndToEnd(backends, true, boxEncodings, scores, anchors, + expectedDetectionBoxes, expectedDetectionClasses, + expectedDetectionScores, expectedNumDetections, + boxScale, boxOffset, scoreScale, scoreOffset, + anchorScale, anchorOffset); + +}; + + +template> +void DetectionPostProcessFastNmsEndToEnd(const std::vector& backends, + const std::vector& boxEncodings, + const std::vector& scores, + const std::vector& anchors, + float boxScale = 1.0f, + int32_t boxOffset = 0, + float scoreScale = 1.0f, + int32_t scoreOffset = 0, + float anchorScale = 1.0f, + int32_t anchorOffset = 0) +{ + std::vector expectedDetectionBoxes({ + 0.0f, 10.0f, 1.0f, 11.0f, + 0.0f, 0.0f, 1.0f, 1.0f, + 0.0f, 100.0f, 1.0f, 101.0f + }); + std::vector expectedDetectionScores({ 0.95f, 0.9f, 0.3f }); + std::vector expectedDetectionClasses({ 1.0f, 0.0f, 0.0f }); + std::vector expectedNumDetections({ 3.0f }); + + DetectionPostProcessEndToEnd(backends, false, boxEncodings, scores, anchors, + expectedDetectionBoxes, expectedDetectionClasses, + expectedDetectionScores, expectedNumDetections, + boxScale, boxOffset, scoreScale, scoreOffset, + anchorScale, anchorOffset); + +}; + +} // anonymous namespace diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index d17b61e8fb..a04fdf72e7 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -163,7 +163,10 @@ void EndToEndLayerTestImpl(INetworkPtr network, } else { - BOOST_TEST(it.second == out); + for (unsigned int i = 0; i < out.size(); ++i) + { + BOOST_TEST(it.second[i] == out[i], boost::test_tools::tolerance(0.000001f)); + } } } } diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 9c1ce1e013..3bf83bd9be 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -154,7 +154,16 @@ std::unique_ptr RefWorkloadFactory::CreateDepthwiseConvolution2d( std::unique_ptr RefWorkloadFactory::CreateDetectionPostProcess( const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + const DataType dataType = info.m_InputTensorInfos[0].GetDataType(); + switch (dataType) + { + case DataType::Float32: + return std::make_unique(descriptor, info); + case DataType::QuantisedAsymm8: + return std::make_unique(descriptor, info); + default: + return MakeWorkload(descriptor, info); + } } std::unique_ptr RefWorkloadFactory::CreateNormalization( diff --git a/src/backends/reference/test/RefDetectionPostProcessTests.cpp b/src/backends/reference/test/RefDetectionPostProcessTests.cpp index 39403f0284..a9faff70b1 100644 --- a/src/backends/reference/test/RefDetectionPostProcessTests.cpp +++ b/src/backends/reference/test/RefDetectionPostProcessTests.cpp @@ -74,8 +74,8 @@ void DetectionPostProcessTestImpl(bool useRegularNms, const std::vector& const std::vector& expectedNumDetections) { armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32); - armnn::TensorInfo scoresInfo({ 1, 6, 4 }, armnn::DataType::Float32); - armnn::TensorInfo anchorsInfo({ 1, 6, 4 }, armnn::DataType::Float32); + armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32); + armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32); armnn::TensorInfo detectionBoxesInfo({ 1, 3, 4 }, armnn::DataType::Float32); armnn::TensorInfo detectionScoresInfo({ 1, 3 }, armnn::DataType::Float32); diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 802167a3a0..c89e586f03 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -4,6 +4,8 @@ // #include + +#include #include #include #include @@ -453,4 +455,168 @@ BOOST_AUTO_TEST_CASE(RefGatherMultiDimUint8Test) GatherMultiDimEndToEnd(defaultBackends); } +BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsTest) +{ + std::vector boxEncodings({ + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f + }); + std::vector scores({ + 0.0f, 0.9f, 0.8f, + 0.0f, 0.75f, 0.72f, + 0.0f, 0.6f, 0.5f, + 0.0f, 0.93f, 0.95f, + 0.0f, 0.5f, 0.4f, + 0.0f, 0.3f, 0.2f + }); + std::vector anchors({ + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 10.5f, 1.0f, 1.0f, + 0.5f, 10.5f, 1.0f, 1.0f, + 0.5f, 100.5f, 1.0f, 1.0f + }); + DetectionPostProcessRegularNmsEndToEnd(defaultBackends, boxEncodings, scores, anchors); +} + +inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo& info) +{ + for (size_t i = 0; i < info.GetNumElements(); i++) + { + quant[i] = armnn::Quantize(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset()); + } +} + +BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsUint8Test) +{ + armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32); + armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32); + armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32); + + boxEncodingsInfo.SetQuantizationScale(1.0f); + boxEncodingsInfo.SetQuantizationOffset(1); + scoresInfo.SetQuantizationScale(0.01f); + scoresInfo.SetQuantizationOffset(0); + anchorsInfo.SetQuantizationScale(0.5f); + anchorsInfo.SetQuantizationOffset(0); + + std::vector boxEncodings({ + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f + }); + std::vector scores({ + 0.0f, 0.9f, 0.8f, + 0.0f, 0.75f, 0.72f, + 0.0f, 0.6f, 0.5f, + 0.0f, 0.93f, 0.95f, + 0.0f, 0.5f, 0.4f, + 0.0f, 0.3f, 0.2f + }); + std::vector anchors({ + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 10.5f, 1.0f, 1.0f, + 0.5f, 10.5f, 1.0f, 1.0f, + 0.5f, 100.5f, 1.0f, 1.0f + }); + + std::vector qBoxEncodings(boxEncodings.size(), 0); + std::vector qScores(scores.size(), 0); + std::vector qAnchors(anchors.size(), 0); + QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo); + QuantizeData(qScores.data(), scores.data(), scoresInfo); + QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo); + DetectionPostProcessRegularNmsEndToEnd(defaultBackends, qBoxEncodings, + qScores, qAnchors, + 1.0f, 1, 0.01f, 0, 0.5f, 0); +} + +BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsTest) +{ + std::vector boxEncodings({ + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f + }); + std::vector scores({ + 0.0f, 0.9f, 0.8f, + 0.0f, 0.75f, 0.72f, + 0.0f, 0.6f, 0.5f, + 0.0f, 0.93f, 0.95f, + 0.0f, 0.5f, 0.4f, + 0.0f, 0.3f, 0.2f + }); + std::vector anchors({ + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 10.5f, 1.0f, 1.0f, + 0.5f, 10.5f, 1.0f, 1.0f, + 0.5f, 100.5f, 1.0f, 1.0f + }); + DetectionPostProcessFastNmsEndToEnd(defaultBackends, boxEncodings, scores, anchors); +} + +BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test) +{ + armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32); + armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32); + armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32); + + boxEncodingsInfo.SetQuantizationScale(1.0f); + boxEncodingsInfo.SetQuantizationOffset(1); + scoresInfo.SetQuantizationScale(0.01f); + scoresInfo.SetQuantizationOffset(0); + anchorsInfo.SetQuantizationScale(0.5f); + anchorsInfo.SetQuantizationOffset(0); + + std::vector boxEncodings({ + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, -1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f + }); + std::vector scores({ + 0.0f, 0.9f, 0.8f, + 0.0f, 0.75f, 0.72f, + 0.0f, 0.6f, 0.5f, + 0.0f, 0.93f, 0.95f, + 0.0f, 0.5f, 0.4f, + 0.0f, 0.3f, 0.2f + }); + std::vector anchors({ + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 0.5f, 1.0f, 1.0f, + 0.5f, 10.5f, 1.0f, 1.0f, + 0.5f, 10.5f, 1.0f, 1.0f, + 0.5f, 100.5f, 1.0f, 1.0f + }); + + std::vector qBoxEncodings(boxEncodings.size(), 0); + std::vector qScores(scores.size(), 0); + std::vector qAnchors(anchors.size(), 0); + QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo); + QuantizeData(qScores.data(), scores.data(), scoresInfo); + QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo); + DetectionPostProcessFastNmsEndToEnd(defaultBackends, qBoxEncodings, + qScores, qAnchors, + 1.0f, 1, 0.01f, 0, 0.5f, 0); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp index 958de8294b..2eb35f5ffa 100644 --- a/src/backends/reference/workloads/DetectionPostProcess.cpp +++ b/src/backends/reference/workloads/DetectionPostProcess.cpp @@ -105,15 +105,15 @@ void AllocateOutputData(unsigned int numOutput, unsigned int numSelected, const for (unsigned int i = 0; i < numOutput; ++i) { unsigned int boxIndex = i * 4; - unsigned int boxConorIndex = selectedBoxes[outputIndices[i]] * 4; if (i < numSelected) { + unsigned int boxCornorIndex = selectedBoxes[outputIndices[i]] * 4; detectionScores[i] = selectedScores[outputIndices[i]]; detectionClasses[i] = boost::numeric_cast(selectedClasses[outputIndices[i]]); - detectionBoxes[boxIndex] = boxCorners[boxConorIndex]; - detectionBoxes[boxIndex + 1] = boxCorners[boxConorIndex + 1]; - detectionBoxes[boxIndex + 2] = boxCorners[boxConorIndex + 2]; - detectionBoxes[boxIndex + 3] = boxCorners[boxConorIndex + 3]; + detectionBoxes[boxIndex] = boxCorners[boxCornorIndex]; + detectionBoxes[boxIndex + 1] = boxCorners[boxCornorIndex + 1]; + detectionBoxes[boxIndex + 2] = boxCorners[boxCornorIndex + 2]; + detectionBoxes[boxIndex + 3] = boxCorners[boxCornorIndex + 3]; } else { @@ -125,7 +125,7 @@ void AllocateOutputData(unsigned int numOutput, unsigned int numSelected, const detectionBoxes[boxIndex + 3] = 0.0f; } } - numDetections[0] = boost::numeric_cast(numOutput); + numDetections[0] = boost::numeric_cast(numSelected); } } // anonymous namespace @@ -216,7 +216,7 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo, std::vector outputIndices = GenerateRangeK(numSelected); TopKSort(numOutput, outputIndices.data(), selectedScoresAfterNms.data(), numSelected); - AllocateOutputData(numOutput, numSelected, boxCorners, outputIndices, + AllocateOutputData(detectionBoxesInfo.GetShape()[1], numOutput, boxCorners, outputIndices, selectedBoxesAfterNms, selectedClasses, selectedScoresAfterNms, detectionBoxes, detectionScores, detectionClasses, numDetections); } @@ -255,7 +255,7 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo, unsigned int numSelected = boost::numeric_cast(selectedIndices.size()); unsigned int numOutput = std::min(desc.m_MaxDetections, numSelected); - AllocateOutputData(numOutput, numSelected, boxCorners, selectedIndices, + AllocateOutputData(detectionBoxesInfo.GetShape()[1], numOutput, boxCorners, selectedIndices, boxIndices, maxScoreClasses, maxScores, detectionBoxes, detectionScores, detectionClasses, numDetections); } -- cgit v1.2.1