aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2020-02-21 16:52:44 +0000
committerJames Conroy <james.conroy@arm.com>2020-02-21 16:52:44 +0000
commitd9fb6e2b401b8508b71d7623f4e0fbd171d7c735 (patch)
tree077995e4e696261a1db02938f02a41d05c65e8a9
parent337c17f964fd4d40fe638ef960f7f96b61b63947 (diff)
downloadarmnn-d9fb6e2b401b8508b71d7623f4e0fbd171d7c735.tar.gz
IVGCVSW-4456 Disable NEON PostDetectionProcess
* Reverting to CpuRef workload because it currently has better performance. * Disabling NEON EndToEnd tests. Signed-off-by: James Conroy <james.conroy@arm.com> Change-Id: Idd5314e46c50581ef4bec24e391779188f20951b
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp24
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp10
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp8
4 files changed, 5 insertions, 39 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 0db97be62c..3c161d553a 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -29,7 +29,6 @@
#include "workloads/NeonDepthToSpaceWorkload.hpp"
#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
#include "workloads/NeonDequantizeWorkload.hpp"
-#include "workloads/NeonDetectionPostProcessWorkload.hpp"
#include "workloads/NeonGreaterWorkload.hpp"
#include "workloads/NeonInstanceNormalizationWorkload.hpp"
#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
@@ -338,29 +337,6 @@ bool NeonLayerSupport::IsDequantizeSupported(const TensorInfo& input,
output);
}
-bool NeonLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
- const TensorInfo& scores,
- const TensorInfo& anchors,
- const TensorInfo& detectionBoxes,
- const TensorInfo& detectionClasses,
- const TensorInfo& detectionScores,
- const TensorInfo& numDetections,
- const DetectionPostProcessDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDetectionPostProcessValidate,
- reasonIfUnsupported,
- boxEncodings,
- scores,
- anchors,
- detectionBoxes,
- detectionClasses,
- detectionScores,
- numDetections,
- descriptor);
-}
-
-
bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index d4f005155d..9cb64eac2b 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -92,16 +92,6 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- bool IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
- const TensorInfo& scores,
- const TensorInfo& anchors,
- const TensorInfo& detectionBoxes,
- const TensorInfo& detectionClasses,
- const TensorInfo& detectionScores,
- const TensorInfo& numDetections,
- const DetectionPostProcessDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 2639850a06..c3e0dc8cc1 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -210,7 +210,7 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDequantize(const Dequantiz
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDetectionPostProcess(
const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
{
- return std::make_unique<NeonDetectionPostProcessWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 4e9fe0f3c3..abded64915 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -513,7 +513,7 @@ BOOST_AUTO_TEST_CASE(NeonArgMinAxis3TestQuantisedAsymm8)
ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest)
+BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest, * boost::unit_test::disabled())
{
std::vector<float> boxEncodings({
0.0f, 0.0f, 0.0f, 0.0f,
@@ -550,7 +550,7 @@ inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo&
}
}
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test)
+BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test, * boost::unit_test::disabled())
{
armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
@@ -599,7 +599,7 @@ BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test)
1.0f, 1, 0.01f, 0, 0.5f, 0);
}
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsTest)
+BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsTest, * boost::unit_test::disabled())
{
std::vector<float> boxEncodings({
0.0f, 0.0f, 0.0f, 0.0f,
@@ -628,7 +628,7 @@ BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsTest)
DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
}
-BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test)
+BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsUint8Test, * boost::unit_test::disabled())
{
armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);