From d9fb6e2b401b8508b71d7623f4e0fbd171d7c735 Mon Sep 17 00:00:00 2001 From: James Conroy Date: Fri, 21 Feb 2020 16:52:44 +0000 Subject: IVGCVSW-4456 Disable NEON PostDetectionProcess * Reverting to CpuRef workload because it currently has better performance. * Disabling NEON EndToEnd tests. Signed-off-by: James Conroy Change-Id: Idd5314e46c50581ef4bec24e391779188f20951b --- src/backends/neon/NeonLayerSupport.cpp | 24 ------------------------ src/backends/neon/NeonLayerSupport.hpp | 10 ---------- src/backends/neon/NeonWorkloadFactory.cpp | 2 +- src/backends/neon/test/NeonEndToEndTests.cpp | 8 ++++---- 4 files changed, 5 insertions(+), 39 deletions(-) diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 0db97be62c..3c161d553a 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -29,7 +29,6 @@ #include "workloads/NeonDepthToSpaceWorkload.hpp" #include "workloads/NeonDepthwiseConvolutionWorkload.hpp" #include "workloads/NeonDequantizeWorkload.hpp" -#include "workloads/NeonDetectionPostProcessWorkload.hpp" #include "workloads/NeonGreaterWorkload.hpp" #include "workloads/NeonInstanceNormalizationWorkload.hpp" #include "workloads/NeonL2NormalizationFloatWorkload.hpp" @@ -338,29 +337,6 @@ bool NeonLayerSupport::IsDequantizeSupported(const TensorInfo& input, output); } -bool NeonLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings, - const TensorInfo& scores, - const TensorInfo& anchors, - const TensorInfo& detectionBoxes, - const TensorInfo& detectionClasses, - const TensorInfo& detectionScores, - const TensorInfo& numDetections, - const DetectionPostProcessDescriptor& descriptor, - Optional reasonIfUnsupported) const -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDetectionPostProcessValidate, - reasonIfUnsupported, - boxEncodings, - scores, - anchors, - detectionBoxes, - detectionClasses, - detectionScores, - numDetections, - descriptor); -} - - bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input, const TensorInfo& output, const DepthwiseConvolution2dDescriptor& descriptor, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index d4f005155d..9cb64eac2b 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -92,16 +92,6 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; - bool IsDetectionPostProcessSupported(const TensorInfo& boxEncodings, - const TensorInfo& scores, - const TensorInfo& anchors, - const TensorInfo& detectionBoxes, - const TensorInfo& detectionClasses, - const TensorInfo& detectionScores, - const TensorInfo& numDetections, - const DetectionPostProcessDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()) const override; - bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input, const TensorInfo& output, const DepthwiseConvolution2dDescriptor& descriptor, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 2639850a06..c3e0dc8cc1 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -210,7 +210,7 @@ std::unique_ptr NeonWorkloadFactory::CreateDequantize(const Dequantiz std::unique_ptr NeonWorkloadFactory::CreateDetectionPostProcess( const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const { - return std::make_unique(descriptor, info); + return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateDivision( diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp index 4e9fe0f3c3..abded64915 100644 --- a/src/backends/neon/test/NeonEndToEndTests.cpp +++ b/src/backends/neon/test/NeonEndToEndTests.cpp @@ -513,7 +513,7 @@ BOOST_AUTO_TEST_CASE(NeonArgMinAxis3TestQuantisedAsymm8) ArgMinAxis3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest) +BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest, * boost::unit_test::disabled()) { std::vector boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, @@ -550,7 +550,7 @@ inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo& } } -BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test) +BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test, * boost::unit_test::disabled()) { armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32); armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32); @@ -599,7 +599,7 @@ BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test) 1.0f, 1, 0.01f, 0, 0.5f, 0); } -BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsTest) +BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsTest, * boost::unit_test::disabled()) { std::vector boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, @@ -628,7 +628,7 @@ BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsTest) DetectionPostProcessFastNmsEndToEnd(defaultBackends, boxEncodings, scores, anchors); } -BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test) +BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsUint8Test, * boost::unit_test::disabled()) { armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32); armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32); -- cgit v1.2.1