diff options
Diffstat (limited to 'src/armnn')
-rw-r--r-- | src/armnn/LayerSupport.cpp | 3 | ||||
-rw-r--r-- | src/armnn/Network.cpp | 5 | ||||
-rw-r--r-- | src/armnn/Network.hpp | 3 | ||||
-rw-r--r-- | src/armnn/layers/L2NormalizationLayer.cpp | 6 | ||||
-rw-r--r-- | src/armnn/layers/L2NormalizationLayer.hpp | 6 | ||||
-rw-r--r-- | src/armnn/test/CreateWorkload.hpp | 8 |
6 files changed, 19 insertions, 12 deletions
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index 6e7e3e14d6..74243df7ba 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -196,10 +196,11 @@ bool IsFullyConnectedSupported(Compute compute, bool IsL2NormalizationSupported(Compute compute, const TensorInfo& input, const TensorInfo& output, + const L2NormalizationDescriptor& descriptor, char* reasonIfUnsupported, size_t reasonIfUnsupportedMaxLength) { - FORWARD_LAYER_SUPPORT_FUNC(compute, IsL2NormalizationSupported, input, output); + FORWARD_LAYER_SUPPORT_FUNC(compute, IsL2NormalizationSupported, input, output, descriptor); } bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo& outputStateIn, diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 4f5e2974ed..49e60e1ad4 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -468,9 +468,10 @@ resizeDescriptor, const char* name) return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name); } -IConnectableLayer* Network::AddL2NormalizationLayer(const char* name) +IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc, + const char* name) { - return m_Graph->AddLayer<L2NormalizationLayer>(name); + return m_Graph->AddLayer<L2NormalizationLayer>(desc, name); } IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index ea4284e38f..129513d657 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -98,7 +98,8 @@ public: IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc, const char* name = nullptr) override; - IConnectableLayer* AddL2NormalizationLayer(const char* name = nullptr) override; + IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc, + const char* name = nullptr) override; IConnectableLayer* AddConstantLayer(const ConstTensor& input, const char* name = nullptr) override; diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp index c114a80c30..683c7db781 100644 --- a/src/armnn/layers/L2NormalizationLayer.cpp +++ b/src/armnn/layers/L2NormalizationLayer.cpp @@ -13,8 +13,8 @@ namespace armnn { -L2NormalizationLayer::L2NormalizationLayer(const char* name) - : Layer(1, 1, LayerType::L2Normalization, name) +L2NormalizationLayer::L2NormalizationLayer(const L2NormalizationDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::L2Normalization, param, name) { } @@ -27,7 +27,7 @@ std::unique_ptr<IWorkload> L2NormalizationLayer::CreateWorkload(const Graph& gra L2NormalizationLayer* L2NormalizationLayer::Clone(Graph& graph) const { - return CloneBase<L2NormalizationLayer>(graph, GetName()); + return CloneBase<L2NormalizationLayer>(graph, m_Param, GetName()); } void L2NormalizationLayer::ValidateTensorShapesFromInputs() diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp index fdf46eb921..bf4d49e7c8 100644 --- a/src/armnn/layers/L2NormalizationLayer.hpp +++ b/src/armnn/layers/L2NormalizationLayer.hpp @@ -4,12 +4,12 @@ // #pragma once -#include <Layer.hpp> +#include "LayerWithParameters.hpp" namespace armnn { -class L2NormalizationLayer : public Layer +class L2NormalizationLayer : public LayerWithParameters<L2NormalizationDescriptor> { public: virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, @@ -20,7 +20,7 @@ public: void ValidateTensorShapesFromInputs() override; protected: - L2NormalizationLayer(const char* name); + L2NormalizationLayer(const L2NormalizationDescriptor& param, const char* name); ~L2NormalizationLayer() = default; }; diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index 52f0673772..61f9f1cd83 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -836,10 +836,13 @@ std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn:: template <typename L2NormalizationWorkload, armnn::DataType DataType> std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory, - armnn::Graph& graph) + armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW) { // Creates the layer we're testing. - Layer* const layer = graph.AddLayer<L2NormalizationLayer>("l2norm"); + L2NormalizationDescriptor layerDesc; + layerDesc.m_DataLayout = dataLayout; + + Layer* const layer = graph.AddLayer<L2NormalizationLayer>(layerDesc, "l2norm"); // Creates extra layers. Layer* const input = graph.AddLayer<InputLayer>(0, "input"); @@ -856,6 +859,7 @@ std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, graph, factory); L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout)); BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); |