From 906f94631aa7ef590b9d8ff45507e818a0d1ac2c Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 10 May 2019 13:55:21 +0100 Subject: IVGCVSW-3076 Add ConcatLayer methods to public API !android-nn-driver:1120 Change-Id: I5192fa3deb4ea9766d38ad0bf4dfbfa0b4924c41 Signed-off-by: Jim Flynn --- include/armnn/ILayerSupport.hpp | 7 +++++++ include/armnn/ILayerVisitor.hpp | 15 +++++++++++++++ include/armnn/INetwork.hpp | 11 +++++++++++ include/armnn/LayerSupport.hpp | 10 ++++++++++ include/armnn/LayerVisitorBase.hpp | 4 ++++ src/armnn/LayerSupport.cpp | 16 ++++++++++++++++ src/armnn/Network.cpp | 10 +++++++++- src/armnn/Network.hpp | 6 +++++- src/armnn/QuantizerVisitor.cpp | 2 +- src/armnn/test/NetworkTests.cpp | 2 ++ src/armnn/test/QuantizerTest.cpp | 2 ++ src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp | 4 ++++ src/armnnCaffeParser/CaffeParser.cpp | 4 ++-- src/armnnDeserializer/Deserializer.cpp | 6 +++--- src/armnnDeserializer/Deserializer.hpp | 2 +- src/armnnDeserializer/DeserializerSupport.md | 2 +- src/armnnSerializer/test/SerializerTests.cpp | 2 ++ src/armnnTfLiteParser/TfLiteParser.cpp | 2 +- src/armnnTfParser/TfParser.cpp | 2 +- src/backends/backendsCommon/LayerSupportBase.cpp | 10 ++++++++++ src/backends/backendsCommon/LayerSupportBase.hpp | 6 ++++++ src/backends/backendsCommon/WorkloadFactory.cpp | 2 ++ src/backends/backendsCommon/test/MergerTestImpl.hpp | 2 ++ src/backends/cl/ClLayerSupport.cpp | 10 ++++++++++ src/backends/cl/ClLayerSupport.hpp | 6 ++++++ src/backends/neon/NeonLayerSupport.cpp | 10 ++++++++++ src/backends/neon/NeonLayerSupport.hpp | 6 ++++++ src/backends/reference/RefLayerSupport.cpp | 10 ++++++++++ src/backends/reference/RefLayerSupport.hpp | 6 ++++++ 29 files changed, 165 insertions(+), 12 deletions(-) diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index b8e48c8704..c3fb7b016e 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -4,6 +4,7 @@ // #pragma once +#include #include #include @@ -47,6 +48,11 @@ public: const BatchToSpaceNdDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const = 0; + virtual bool IsConcatSupported(const std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const = 0; + virtual bool IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const = 0; @@ -184,6 +190,7 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const = 0; + ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") virtual bool IsMergerSupported(const std::vector inputs, const TensorInfo& output, const OriginsDescriptor& descriptor, diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp index ab793bc587..10d0cc6b63 100644 --- a/include/armnn/ILayerVisitor.hpp +++ b/include/armnn/ILayerVisitor.hpp @@ -58,6 +58,20 @@ public: const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, const char* name = nullptr) = 0; + /// Function that a concat layer should call back to when its Accept(ILayerVisitor&) function is invoked. + /// @param layer - pointer to the layer which is calling back to this visit function. + /// @param mergerDescriptor - WindowsDescriptor to configure the concatenation process. Number of Views must be + /// equal to the number of inputs, and their order must match - e.g. first view + /// corresponds to the first input, second view to the second input, etc.... + /// @param name - Optional name for the layer. + virtual void VisitConcatLayer(const IConnectableLayer* layer, + const OriginsDescriptor& mergerDescriptor, + const char* name = nullptr) + { + // default implementation to ease transition while MergerLayer is being deprecated + VisitMergerLayer(layer, mergerDescriptor, name); + } + /// Function a layer with no inputs and a single output, which always corresponds to /// the passed in constant tensor should call back to when its Accept(ILayerVisitor&) function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. @@ -211,6 +225,7 @@ public: /// the number of inputs, and their order must match - e.g. first view corresponds to /// the first input, second view to the second input, etc.... /// @param name - Optional name for the layer. + // NOTE: this method will be deprecated and replaced by VisitConcatLayer virtual void VisitMergerLayer(const IConnectableLayer* layer, const OriginsDescriptor& mergerDescriptor, const char* name = nullptr) = 0; diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 7141770298..bae6e94955 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -4,6 +4,7 @@ // #pragma once +#include #include #include #include @@ -100,6 +101,15 @@ public: /// @return - Interface for configuring the layer. virtual IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr) = 0; + /// Adds a concatenation layer to the network. + /// @param mergerDescriptor - WindowsDescriptor to configure the concatenation process. Number of Views must + /// be equal to the number of inputs, and their order must match - e.g. first view + /// corresponds to the first input, second view to the second input, etc.... + /// @param name - Optional name for the layer. + /// @return - Interface for configuring the layer. + virtual IConnectableLayer* AddConcatLayer(const OriginsDescriptor& mergerDescriptor, + const char* name = nullptr) = 0; + /// Adds a 2D convolution layer to the network. /// @param convolution2dDescriptor - Description of the 2D convolution layer. /// @param weights - Tensor for the weights data. @@ -248,6 +258,7 @@ public: /// the first input, second view to the second input, etc.... /// @param name - Optional name for the layer. /// @return - Interface for configuring the layer. + ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead") virtual IConnectableLayer* AddMergerLayer(const OriginsDescriptor& mergerDescriptor, const char* name = nullptr) = 0; diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp index c9fc264e0c..e105b67740 100644 --- a/include/armnn/LayerSupport.hpp +++ b/include/armnn/LayerSupport.hpp @@ -4,6 +4,7 @@ // #pragma once +#include #include #include #include @@ -48,6 +49,14 @@ bool IsBatchToSpaceNdSupported(const BackendId& backend, char* reasonIfUnsupported = nullptr, size_t reasonIfUnsupportedMaxLength = 1024); +/// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsConcatSupported(const BackendId& backend, + const std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + /// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsConstantSupported(const BackendId& backend, const TensorInfo& output, @@ -212,6 +221,7 @@ bool IsMergeSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength = 1024); /// Deprecated in favor of IBackend and ILayerSupport interfaces +ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const BackendId& backend, const std::vector inputs, const TensorInfo& output, diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp index 12eb225674..62673ace07 100644 --- a/include/armnn/LayerVisitorBase.hpp +++ b/include/armnn/LayerVisitorBase.hpp @@ -33,6 +33,10 @@ public: LayerBindingId, const char*) override { DefaultPolicy::Apply(); } + void VisitConcatLayer(const IConnectableLayer*, + const OriginsDescriptor&, + const char*) override { DefaultPolicy::Apply(); } + void VisitConvolution2dLayer(const IConnectableLayer*, const Convolution2dDescriptor&, const ConstTensor&, diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index 831a846092..47a0d3ec6b 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -130,6 +130,18 @@ bool IsBatchToSpaceNdSupported(const BackendId& backend, descriptor); } +bool IsConcatSupported(const BackendId& backend, + std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsMergerSupported(backend, inputs, output, descriptor, reasonIfUnsupported, reasonIfUnsupportedMaxLength); + ARMNN_NO_DEPRECATE_WARN_END +} + bool IsConstantSupported(const BackendId& backend, const TensorInfo& output, char* reasonIfUnsupported, @@ -386,6 +398,7 @@ bool IsMergeSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output); } +ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const BackendId& backend, std::vector inputs, const TensorInfo& output, @@ -394,7 +407,10 @@ bool IsMergerSupported(const BackendId& backend, size_t reasonIfUnsupportedMaxLength) { BOOST_ASSERT(inputs.size() > 0); + + ARMNN_NO_DEPRECATE_WARN_BEGIN FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor); + ARMNN_NO_DEPRECATE_WARN_END } bool IsMinimumSupported(const BackendId& backend, diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index c3f29d4f3b..087ec0f8e9 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -602,6 +602,14 @@ IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescripto return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name); } +IConnectableLayer* Network::AddConcatLayer(const OriginsDescriptor& mergerDescriptor, + const char* name) +{ + ARMNN_NO_DEPRECATE_WARN_BEGIN + return AddMergerLayer(mergerDescriptor, name); + ARMNN_NO_DEPRECATE_WARN_END +} + IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor, const ConstTensor& weights, const Optional& biases, @@ -762,7 +770,7 @@ IConnectableLayer* Network::AddMinimumLayer(const char* name) } IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor, - const char* name) + const char* name) { return m_Graph->AddLayer(mergerDescriptor, name); } diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 660ca87d13..a569a7c847 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -37,6 +37,9 @@ public: IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, const char* name = nullptr) override; + IConnectableLayer* AddConcatLayer(const OriginsDescriptor& mergerDescriptor, + const char* name = nullptr) override; + IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, const ConstTensor& weights, const Optional& biases, @@ -115,8 +118,9 @@ public: IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor, const char* name = nullptr) override; + ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead") IConnectableLayer* AddMergerLayer(const OriginsDescriptor& mergerDescriptor, - const char* name = nullptr) override; + const char* name = nullptr) override; IConnectableLayer* AddAdditionLayer(const char* name = nullptr) override; diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp index c1e7c2c205..38e33cf2a3 100644 --- a/src/armnn/QuantizerVisitor.cpp +++ b/src/armnn/QuantizerVisitor.cpp @@ -267,7 +267,7 @@ void QuantizerVisitor::VisitMergerLayer(const IConnectableLayer* layer, const OriginsDescriptor& mergerDescriptor, const char* name) { - IConnectableLayer* newLayer = m_QuantizedNetwork->AddMergerLayer(mergerDescriptor, name); + IConnectableLayer* newLayer = m_QuantizedNetwork->AddConcatLayer(mergerDescriptor, name); RecordLayer(layer, newLayer); SetQuantizedInputConnections(layer, newLayer); } diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp index dd8eb7773f..155304be36 100644 --- a/src/armnn/test/NetworkTests.cpp +++ b/src/armnn/test/NetworkTests.cpp @@ -254,7 +254,9 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMerger) // Adds a merger layer. armnn::OriginsDescriptor mergerDesc(2, 4); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* mergerLayer = net.AddMergerLayer(mergerDesc, "merger layer"); + ARMNN_NO_DEPRECATE_WARN_END BOOST_TEST(mergerLayer); softmaxLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0)); diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index 2103de062c..4f22317651 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -1283,7 +1283,9 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger) IConnectableLayer* input2 = network->AddInputLayer(2); OriginsDescriptor descriptor(3, 1); + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* merger = network->AddMergerLayer(descriptor); + ARMNN_NO_DEPRECATE_WARN_END IConnectableLayer* output0 = network->AddOutputLayer(3); diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp index dbadb75a09..f94906d10d 100644 --- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp +++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp @@ -239,7 +239,9 @@ BOOST_AUTO_TEST_CASE(CheckMergerLayerVisitorNameAndDescriptor) TestMergerLayerVisitor visitor(descriptor, layerName); Network net; + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer *const layer = net.AddMergerLayer(descriptor, layerName); + ARMNN_NO_DEPRECATE_WARN_END layer->Accept(visitor); } @@ -251,7 +253,9 @@ BOOST_AUTO_TEST_CASE(CheckMergerLayerVisitorNameNullAndDescriptor) TestMergerLayerVisitor visitor(descriptor); Network net; + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer *const layer = net.AddMergerLayer(descriptor); + ARMNN_NO_DEPRECATE_WARN_END layer->Accept(visitor); } diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp index 3cc0fb9319..90579e67fd 100644 --- a/src/armnnCaffeParser/CaffeParser.cpp +++ b/src/armnnCaffeParser/CaffeParser.cpp @@ -589,7 +589,7 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP outputShape.set_dim(1, mergeDimSizes[1]); // Finally add the merge layer - IConnectableLayer* mergerLayer = m_Network->AddMergerLayer(mergeDesc, layerParam.name().c_str()); + IConnectableLayer* mergerLayer = m_Network->AddConcatLayer(mergeDesc, layerParam.name().c_str()); if (!mergerLayer) { @@ -1325,7 +1325,7 @@ void CaffeParserBase::ParseConcatLayer(const LayerParameter& layerParam) } mergeDimSizes[concatDim] = mergeDim; - armnn::IConnectableLayer* concatlayer = m_Network->AddMergerLayer(concatDescriptor, layerParam.name().c_str()); + armnn::IConnectableLayer* concatlayer = m_Network->AddConcatLayer(concatDescriptor, layerParam.name().c_str()); for (unsigned int i = 0; i < numInputs; ++i) { armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(boost::numeric_cast(i))); diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 8b790f7b04..b7d45e0a7d 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -209,7 +209,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer) m_ParserFunctions[Layer_MeanLayer] = &Deserializer::ParseMean; m_ParserFunctions[Layer_MinimumLayer] = &Deserializer::ParseMinimum; m_ParserFunctions[Layer_MergeLayer] = &Deserializer::ParseMerge; - m_ParserFunctions[Layer_MergerLayer] = &Deserializer::ParseMerger; + m_ParserFunctions[Layer_MergerLayer] = &Deserializer::ParseConcat; m_ParserFunctions[Layer_MultiplicationLayer] = &Deserializer::ParseMultiplication; m_ParserFunctions[Layer_NormalizationLayer] = &Deserializer::ParseNormalization; m_ParserFunctions[Layer_PadLayer] = &Deserializer::ParsePad; @@ -1213,7 +1213,7 @@ void Deserializer::ParseMaximum(GraphPtr graph, unsigned int layerIndex) RegisterOutputSlots(graph, layerIndex, layer); } -void Deserializer::ParseMerger(GraphPtr graph, unsigned int layerIndex) +void Deserializer::ParseConcat(GraphPtr graph, unsigned int layerIndex) { CHECK_LAYERS(graph, 0, layerIndex); CHECK_LOCATION(); @@ -1244,7 +1244,7 @@ void Deserializer::ParseMerger(GraphPtr graph, unsigned int layerIndex) } descriptor.SetConcatAxis(mergerDescriptor->concatAxis()); - IConnectableLayer* layer = m_Network->AddMergerLayer(descriptor, layerName.c_str()); + IConnectableLayer* layer = m_Network->AddConcatLayer(descriptor, layerName.c_str()); armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp index dfa5b06057..c647ac3639 100644 --- a/src/armnnDeserializer/Deserializer.hpp +++ b/src/armnnDeserializer/Deserializer.hpp @@ -82,6 +82,7 @@ private: void ParseAdd(GraphPtr graph, unsigned int layerIndex); void ParseBatchToSpaceNd(GraphPtr graph, unsigned int layerIndex); void ParseBatchNormalization(GraphPtr graph, unsigned int layerIndex); + void ParseConcat(GraphPtr graph, unsigned int layerIndex); void ParseConstant(GraphPtr graph, unsigned int layerIndex); void ParseConvolution2d(GraphPtr graph, unsigned int layerIndex); void ParseDepthwiseConvolution2d(GraphPtr graph, unsigned int layerIndex); @@ -98,7 +99,6 @@ private: void ParseMean(GraphPtr graph, unsigned int layerIndex); void ParseMinimum(GraphPtr graph, unsigned int layerIndex); void ParseMerge(GraphPtr graph, unsigned int layerIndex); - void ParseMerger(GraphPtr graph, unsigned int layerIndex); void ParseMultiplication(GraphPtr graph, unsigned int layerIndex); void ParseNormalization(GraphPtr graph, unsigned int layerIndex); void ParseLstm(GraphPtr graph, unsigned int layerIndex); diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md index 9cdeea2800..5c61971ebc 100644 --- a/src/armnnDeserializer/DeserializerSupport.md +++ b/src/armnnDeserializer/DeserializerSupport.md @@ -10,6 +10,7 @@ The Arm NN SDK Deserialize parser currently supports the following layers: * Addition * BatchToSpaceNd * BatchNormalization +* Concat * Constant * Convolution2d * DepthwiseConvolution2d @@ -26,7 +27,6 @@ The Arm NN SDK Deserialize parser currently supports the following layers: * Maximum * Mean * Merge -* Merger * Minimum * Multiplication * Normalization diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index b0226689f4..4b3a09e47c 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -1276,7 +1276,9 @@ BOOST_AUTO_TEST_CASE(SerializeMerger) armnn::INetworkPtr network = armnn::INetwork::Create(); armnn::IConnectableLayer* const inputLayerOne = network->AddInputLayer(0); armnn::IConnectableLayer* const inputLayerTwo = network->AddInputLayer(1); + ARMNN_NO_DEPRECATE_WARN_BEGIN armnn::IConnectableLayer* const mergerLayer = network->AddMergerLayer(descriptor, layerName.c_str()); + ARMNN_NO_DEPRECATE_WARN_END armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); inputLayerOne->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0)); diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index fdb38122c2..036a881d1c 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -1692,7 +1692,7 @@ void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex } auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex); - IConnectableLayer* layer = m_Network->AddMergerLayer(concatDescriptor, layerName.c_str()); + IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str()); BOOST_ASSERT(layer != nullptr); diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp index d7cfba8dfe..e5948d55f4 100755 --- a/src/armnnTfParser/TfParser.cpp +++ b/src/armnnTfParser/TfParser.cpp @@ -2120,7 +2120,7 @@ ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef, // Update the output shape mergeDims[concatDim] = mergeDim; - armnn::IConnectableLayer *layer = m_Network->AddMergerLayer(concatDescriptor, nodeDef.name().c_str()); + armnn::IConnectableLayer *layer = m_Network->AddConcatLayer(concatDescriptor, nodeDef.name().c_str()); layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(mergeDims, DataType::Float32)); diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index b37fa331ad..7760c079ac 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -68,6 +68,16 @@ bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo& input, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsConcatSupported(const std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END +} + bool LayerSupportBase::IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 0c32a640b3..88d5792819 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -37,6 +37,11 @@ public: const BatchToSpaceNdDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsConcatSupported(const std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -173,6 +178,7 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const std::vector inputs, const TensorInfo& output, const OriginsDescriptor& descriptor, diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index d9774b063d..9ed0e29673 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -554,7 +554,9 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + ARMNN_NO_DEPRECATE_WARN_BEGIN result = layerSupportObject->IsMergerSupported(inputPtrs, output, cLayer->GetParameters(), reason); + ARMNN_NO_DEPRECATE_WARN_END break; } case LayerType::Multiplication: diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/MergerTestImpl.hpp index 2511bd44ba..35ab2bc861 100644 --- a/src/backends/backendsCommon/test/MergerTestImpl.hpp +++ b/src/backends/backendsCommon/test/MergerTestImpl.hpp @@ -33,7 +33,9 @@ INetworkPtr CreateMergerNetwork(const std::vector& inputShapes, descriptor = CreateMergerDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatAxis); + ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* merger = net->AddMergerLayer(descriptor, "merger"); + ARMNN_NO_DEPRECATE_WARN_END for (unsigned int i = 0; i < inputShapes.size(); ++i) { diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index 73c9e49c4f..a557870cea 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -185,6 +185,16 @@ bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, descriptor); } +bool ClLayerSupport::IsConcatSupported(const std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END +} + bool ClLayerSupport::IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index e9a9e68005..b634d46768 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -36,6 +36,11 @@ public: const BatchToSpaceNdDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsConcatSupported(const std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -138,6 +143,7 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const std::vector inputs, const TensorInfo& output, const OriginsDescriptor& descriptor, diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index c257dd373a..9bd48cf860 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -142,6 +142,16 @@ bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, descriptor); } +bool NeonLayerSupport::IsConcatSupported(const std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END +} + bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index a5aae0bde9..8312bb977a 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -31,6 +31,11 @@ public: const BatchNormalizationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsConcatSupported(const std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -101,6 +106,7 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const std::vector inputs, const TensorInfo& output, const OriginsDescriptor& descriptor, diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index a1d8e7de81..f79c152139 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -293,6 +293,16 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, &TrueFunc<>)); } +bool RefLayerSupport::IsConcatSupported(const std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END +} + bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 9b1a95cdcd..a4ae01e403 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -36,6 +36,11 @@ public: const BatchToSpaceNdDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsConcatSupported(const std::vector inputs, + const TensorInfo& output, + const OriginsDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -162,6 +167,7 @@ public: const MeanDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const std::vector inputs, const TensorInfo& output, const OriginsDescriptor& descriptor, -- cgit v1.2.1