aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornarpra01 <narumol.prangnawarat@arm.com>2018-11-16 12:38:41 +0000
committerAron Virginas-Tar <aron.virginas-tar@arm.com>2018-11-16 14:05:35 +0000
commitfca75c39b1d375b7b3c8f08b6b160aa2e6f260bb (patch)
tree924ff5f8c49cb8282f7842cac816a869bcb8dd9b
parent81f2232a7e1145f80aaa2e382bb02c7653a058aa (diff)
downloadarmnn-fca75c39b1d375b7b3c8f08b6b160aa2e6f260bb.tar.gz
IVGCVSW-2155 - Always call NEConvolutionLayer in NeonConvolution2dWorkload
* Remove check for preferDirectConvolution * Remove IsNeonDirectConvolutionPreferred * Remove unused tests and functions Change-Id: I3f0868f41403ec5fa740889e7bdcb4415ad77bda
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp34
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp2
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp48
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp30
4 files changed, 7 insertions, 107 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 165e0677b7..28c4b75f2a 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -453,38 +453,4 @@ bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
output);
}
-bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc)
-{
- // See arm_compute::NEDirectConvolutionLayer documentation for the supported cases,
- // and complement with NEDirectConvolutionLayerKernel::configure() implementation.
-
- // Only 1x1 is using direct convolution. Performance results and details are in:
- // https://jira.arm.com/browse/IVGCVSW-1003
- // Measurements were taken as of clframework: f105ab972135bcd21304883eff040d7e587099bc
-
- const bool dataTypeSupported = (weightInfo.GetDataType() == armnn::DataType::Float32);
-
- // Strides: 1|2|3
- const bool strideSupported = (desc.m_StrideX == 1 || desc.m_StrideX == 2 || desc.m_StrideX == 3) &&
- (desc.m_StrideY == 1 || desc.m_StrideY == 2 || desc.m_StrideY == 3);
-
- auto paddingLargerThan = [](const Convolution2dDescriptor& conv2ddesc, unsigned int value)
- {
- return conv2ddesc.m_PadLeft > value || conv2ddesc.m_PadRight > value ||
- conv2ddesc.m_PadTop > value || conv2ddesc.m_PadBottom > value;
- };
-
- // Supported sizes and padding.
- const bool sizeAndPaddingSupported =
- // Pad > 0 not supported for 1x1 weights.
- (weightInfo.GetShape()[2] == 1 && weightInfo.GetShape()[3] == 1 && !paddingLargerThan(desc, 0u));
-
- const bool preferDirectConvolution = dataTypeSupported &&
- strideSupported &&
- sizeAndPaddingSupported &&
- // NEDirectConvolutionLayerKernel doesn't support NULL bias.
- desc.m_BiasEnabled;
- return preferDirectConvolution;
-}
-
} // namespace armnn
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 5e80ab8106..fb1567c12d 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -169,6 +169,4 @@ public:
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
}; // class NeonLayerSupport
-bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc);
-
} // namespace armnn
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index d4bfa17960..f0410f25c5 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -44,54 +44,6 @@ ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPaddingNhwc,
armnn::DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
-namespace
-{
-
-armnn::Convolution2dDescriptor MakeConv2dDesc(uint32_t strideX, uint32_t strideY,
- uint32_t padLeft = 0, uint32_t padRight = 0, uint32_t padTop = 0, uint32_t padBottom = 0)
-{
- armnn::Convolution2dDescriptor result;
- result.m_StrideX = strideX;
- result.m_StrideY = strideY;
- result.m_PadLeft = padLeft;
- result.m_PadRight = padRight;
- result.m_PadTop = padTop;
- result.m_PadBottom = padBottom;
- result.m_BiasEnabled = true;
- return result;
-}
-
-}
-
-BOOST_AUTO_TEST_CASE(Conv2dUtils)
-{
- // The only preferred Neon convolution is 1x1 with padding=0 and stride size {1,2,3}.
- armnn::TensorShape shape1x1({ 1,1,1,1 });
- armnn::TensorInfo info1x1(shape1x1, armnn::DataType::Float32);
- BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1)));
- BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 2)));
- BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 3)));
- BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 1)));
- BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 2)));
- BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 3)));
- BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 1)));
- BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 2)));
- BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 3)));
-
- BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(4, 1)));
- BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(4, 5)));
- BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 6)));
-
- // non zero padding is not preferred for direct convolution
- BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 1, 0)));
- BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 0, 1)));
- BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 1, 1)));
-
- // 2x2 filter not preferred for direct convolution
- armnn::TensorShape shape2x2({ 1,1,2,2 });
- armnn::TensorInfo info2x2(shape2x2, armnn::DataType::Float32);
- BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info2x2, MakeConv2dDesc(1, 1)));
-}
// Depthwise Convolution
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1,
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 781c91a8f3..151132f04d 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -82,30 +82,14 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
m_Data.m_Parameters.m_PadBottom,
arm_compute::DimensionRoundingType::FLOOR);
- const bool preferDirectConvolution =
- IsNeonDirectConvolutionPreferred(m_Data.m_Weight->GetTensorInfo(),
- m_Data.m_Parameters);
+ auto convolutionLayer = std::make_unique<arm_compute::NEConvolutionLayer>(memoryManager);
+ convolutionLayer->configure(&input,
+ m_KernelTensor.get(),
+ m_BiasTensor.get(),
+ &output,
+ padStrideInfo);
+ m_ConvolutionLayer.reset(convolutionLayer.release());
- if (preferDirectConvolution)
- {
- auto directConvolutionLayer = std::make_unique<arm_compute::NEDirectConvolutionLayer>(memoryManager);
- directConvolutionLayer->configure(&input,
- m_KernelTensor.get(),
- m_BiasTensor.get(),
- &output,
- padStrideInfo);
- m_ConvolutionLayer.reset(directConvolutionLayer.release());
- }
- else
- {
- auto convolutionLayer = std::make_unique<arm_compute::NEConvolutionLayer>(memoryManager);
- convolutionLayer->configure(&input,
- m_KernelTensor.get(),
- m_BiasTensor.get(),
- &output,
- padStrideInfo);
- m_ConvolutionLayer.reset(convolutionLayer.release());
- }
BOOST_ASSERT(m_ConvolutionLayer);
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);