aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-07-27 15:52:13 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-07-27 15:52:13 +0100
commit1a26896fd8d48205393ba0f22db864b5302b703f (patch)
treef5bdc6414951f10c88b6375cec694685b0682c8c
parent11c4efc983cacfae6b1442d18a1744e114a82e64 (diff)
downloadarmnn-1a26896fd8d48205393ba0f22db864b5302b703f.tar.gz
IVGCVSW-5011 Implement GetCapabilities in NeonTensorHandleFactory
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I43591ec1250c1d84d286de85956a86eb5e2abc2a
-rw-r--r--src/backends/neon/NeonTensorHandleFactory.cpp21
-rw-r--r--src/backends/neon/NeonTensorHandleFactory.hpp28
-rw-r--r--src/backends/neon/test/CMakeLists.txt1
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp80
4 files changed, 130 insertions, 0 deletions
diff --git a/src/backends/neon/NeonTensorHandleFactory.cpp b/src/backends/neon/NeonTensorHandleFactory.cpp
index a8b5b81412..ec9e0631fe 100644
--- a/src/backends/neon/NeonTensorHandleFactory.cpp
+++ b/src/backends/neon/NeonTensorHandleFactory.cpp
@@ -6,6 +6,8 @@
#include "NeonTensorHandleFactory.hpp"
#include "NeonTensorHandle.hpp"
+#include "Layer.hpp"
+
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
@@ -108,4 +110,23 @@ MemorySourceFlags NeonTensorHandleFactory::GetImportFlags() const
return 0;
}
+std::vector<Capability> NeonTensorHandleFactory::GetCapabilities(const IConnectableLayer* layer,
+ const IConnectableLayer* connectedLayer,
+ CapabilityClass capabilityClass)
+
+{
+ IgnoreUnused(connectedLayer);
+ std::vector<Capability> capabilities;
+ if (capabilityClass == CapabilityClass::PaddingRequired)
+ {
+ auto search = paddingRequiredLayers.find((PolymorphicDowncast<const Layer*>(layer))->GetType());
+ if ( search != paddingRequiredLayers.end())
+ {
+ Capability paddingCapability(CapabilityClass::PaddingRequired, true);
+ capabilities.push_back(paddingCapability);
+ }
+ }
+ return capabilities;
+}
+
} // namespace armnn
diff --git a/src/backends/neon/NeonTensorHandleFactory.hpp b/src/backends/neon/NeonTensorHandleFactory.hpp
index e1cdc8bbac..0930d4e8d7 100644
--- a/src/backends/neon/NeonTensorHandleFactory.hpp
+++ b/src/backends/neon/NeonTensorHandleFactory.hpp
@@ -13,6 +13,30 @@ namespace armnn
constexpr const char* NeonTensorHandleFactoryId() { return "Arm/Neon/TensorHandleFactory"; }
+const std::set<armnn::LayerType> paddingRequiredLayers {
+ LayerType::ArgMinMax,
+ LayerType::Concat,
+ LayerType::Convolution2d,
+ LayerType::DepthToSpace,
+ LayerType::DepthwiseConvolution2d,
+ LayerType::Dequantize,
+ LayerType::FullyConnected,
+ LayerType::Gather,
+ LayerType::L2Normalization,
+ LayerType::Lstm,
+ LayerType::Mean,
+ LayerType::Multiplication,
+ LayerType::Normalization,
+ LayerType::Permute,
+ LayerType::Pooling2d,
+ LayerType::Quantize,
+ LayerType::QuantizedLstm,
+ LayerType::Resize,
+ LayerType::Stack,
+ LayerType::Transpose,
+ LayerType::TransposeConvolution2d
+};
+
class NeonTensorHandleFactory : public ITensorHandleFactory
{
public:
@@ -46,6 +70,10 @@ public:
MemorySourceFlags GetImportFlags() const override;
+ std::vector<Capability> GetCapabilities(const IConnectableLayer* layer,
+ const IConnectableLayer* connectedLayer,
+ CapabilityClass capabilityClass) override;
+
private:
mutable std::shared_ptr<NeonMemoryManager> m_MemoryManager;
};
diff --git a/src/backends/neon/test/CMakeLists.txt b/src/backends/neon/test/CMakeLists.txt
index 0c3944f3d5..16c066bcbd 100644
--- a/src/backends/neon/test/CMakeLists.txt
+++ b/src/backends/neon/test/CMakeLists.txt
@@ -11,6 +11,7 @@ list(APPEND armnnNeonBackendUnitTests_sources
NeonLayerTests.cpp
NeonOptimizedNetworkTests.cpp
NeonRuntimeTests.cpp
+ NeonTensorHandleTests.cpp
NeonTimerTest.cpp
NeonWorkloadFactoryHelper.hpp
)
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
new file mode 100644
index 0000000000..fe5e8f9fb3
--- /dev/null
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -0,0 +1,80 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <neon/NeonTensorHandle.hpp>
+#include <neon/NeonTensorHandleFactory.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(NeonTensorHandleTests)
+using namespace armnn;
+
+BOOST_AUTO_TEST_CASE(NeonTensorHandleGetCapabilitiesNoPadding)
+{
+ std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
+ NeonTensorHandleFactory handleFactory(memoryManager);
+
+ INetworkPtr network(INetwork::Create());
+
+ // Add the layers
+ IConnectableLayer* input = network->AddInputLayer(0);
+ SoftmaxDescriptor descriptor;
+ descriptor.m_Beta = 1.0f;
+ IConnectableLayer* softmax = network->AddSoftmaxLayer(descriptor);
+ IConnectableLayer* output = network->AddOutputLayer(2);
+
+ // Establish connections
+ input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
+ softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // No padding required for input
+ std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
+ softmax,
+ CapabilityClass::PaddingRequired);
+ BOOST_TEST(capabilities.empty());
+
+ // No padding required for Softmax
+ capabilities = handleFactory.GetCapabilities(softmax, output, CapabilityClass::PaddingRequired);
+ BOOST_TEST(capabilities.empty());
+
+ // No padding required for output
+ capabilities = handleFactory.GetCapabilities(output, nullptr, CapabilityClass::PaddingRequired);
+ BOOST_TEST(capabilities.empty());
+}
+
+BOOST_AUTO_TEST_CASE(NeonTensorHandleGetCapabilitiesPadding)
+{
+ std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
+ NeonTensorHandleFactory handleFactory(memoryManager);
+
+ INetworkPtr network(INetwork::Create());
+
+ // Add the layers
+ IConnectableLayer* input = network->AddInputLayer(0);
+ Pooling2dDescriptor descriptor;
+ IConnectableLayer* pooling = network->AddPooling2dLayer(descriptor);
+ IConnectableLayer* output = network->AddOutputLayer(2);
+
+ // Establish connections
+ input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
+ pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // No padding required for input
+ std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
+ pooling,
+ CapabilityClass::PaddingRequired);
+ BOOST_TEST(capabilities.empty());
+
+ // No padding required for output
+ capabilities = handleFactory.GetCapabilities(output, nullptr, CapabilityClass::PaddingRequired);
+ BOOST_TEST(capabilities.empty());
+
+ // Padding required for Pooling2d
+ capabilities = handleFactory.GetCapabilities(pooling, output, CapabilityClass::PaddingRequired);
+ BOOST_TEST(capabilities.size() == 1);
+ BOOST_TEST((capabilities[0].m_CapabilityClass == CapabilityClass::PaddingRequired));
+ BOOST_TEST(capabilities[0].m_Value);
+}
+
+BOOST_AUTO_TEST_SUITE_END()