diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/armnn/InternalTypes.hpp | 5 | ||||
-rw-r--r-- | src/armnn/LayersFwd.hpp | 2 | ||||
-rw-r--r-- | src/armnn/layers/UnmapLayer.cpp | 49 | ||||
-rw-r--r-- | src/armnn/layers/UnmapLayer.hpp | 42 | ||||
-rw-r--r-- | src/backends/backendsCommon/CMakeLists.txt | 2 | ||||
-rw-r--r-- | src/backends/backendsCommon/UnmapWorkload.cpp | 22 | ||||
-rw-r--r-- | src/backends/backendsCommon/UnmapWorkload.hpp | 19 | ||||
-rw-r--r-- | src/backends/backendsCommon/WorkloadData.cpp | 20 | ||||
-rw-r--r-- | src/backends/backendsCommon/WorkloadData.hpp | 5 | ||||
-rw-r--r-- | src/backends/backendsCommon/common.mk | 1 | ||||
-rw-r--r-- | src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp | 27 |
11 files changed, 190 insertions, 4 deletions
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index e95a63af45..778408ae60 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -74,7 +74,8 @@ X(Subtraction) \ X(Switch) \ X(Transpose) \ - X(TransposeConvolution2d) + X(TransposeConvolution2d) \ + X(Unmap) /// When adding a new layer, adapt also the LastLayer enum value in the /// enum class LayerType below @@ -87,7 +88,7 @@ enum class LayerType LIST_OF_LAYER_TYPE #undef X FirstLayer = Activation, - LastLayer = TransposeConvolution2d + LastLayer = Unmap }; const char* GetLayerTypeAsCString(LayerType type); diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index f22110d3ac..ccc5ef2b4c 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -69,6 +69,7 @@ #include "layers/SwitchLayer.hpp" #include "layers/TransposeConvolution2dLayer.hpp" #include "layers/TransposeLayer.hpp" +#include "layers/UnmapLayer.hpp" namespace armnn { @@ -160,5 +161,6 @@ DECLARE_LAYER(Subtraction) DECLARE_LAYER(Switch) DECLARE_LAYER(Transpose) DECLARE_LAYER(TransposeConvolution2d) +DECLARE_LAYER(Unmap) } diff --git a/src/armnn/layers/UnmapLayer.cpp b/src/armnn/layers/UnmapLayer.cpp new file mode 100644 index 0000000000..d2df9c1bc6 --- /dev/null +++ b/src/armnn/layers/UnmapLayer.cpp @@ -0,0 +1,49 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "UnmapLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include <armnn/TypesUtils.hpp> +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> +#include <backendsCommon/UnmapWorkload.hpp> + +namespace armnn +{ + +UnmapLayer::UnmapLayer(const char* name) + : Layer(1, 0, LayerType::Unmap, name) +{ +} + +UnmapLayer* UnmapLayer::Clone(Graph& graph) const +{ + return CloneBase<UnmapLayer>(graph, GetName()); +} + +std::unique_ptr<IWorkload> UnmapLayer::CreateWorkload(const IWorkloadFactory& factory) const +{ + IgnoreUnused(factory); + UnmapQueueDescriptor descriptor; + + //This is different from other workloads. Does not get created by the workload factory. + return std::make_unique<UnmapWorkload>(descriptor, PrepInfoAndDesc(descriptor)); +} + +void UnmapLayer::ValidateTensorShapesFromInputs() +{ + // validates that the input is connected. + VerifyLayerConnections(1, CHECK_LOCATION()); + ARMNN_ASSERT(GetNumOutputSlots() == 0); +} + +void UnmapLayer::Accept(ILayerVisitor& visitor) const +{ + IgnoreUnused(visitor); + throw armnn::Exception("UnmapLayer should not appear in an input graph"); +} + +} // namespace armnn diff --git a/src/armnn/layers/UnmapLayer.hpp b/src/armnn/layers/UnmapLayer.hpp new file mode 100644 index 0000000000..12d4342d62 --- /dev/null +++ b/src/armnn/layers/UnmapLayer.hpp @@ -0,0 +1,42 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include <Layer.hpp> + +namespace armnn +{ + +/// This layer represents a memory copy operation. +class UnmapLayer : public Layer +{ +public: + /// Makes a workload for the Unmap type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr<IWorkload>CreateWorkload(const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + UnmapLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref UnmapLayer. + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs() override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a UnmapLayer. + /// @param [in] name Optional name for the layer. + UnmapLayer(const char* name); + + /// Default destructor + ~UnmapLayer() = default; +}; + +} // namespace diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt index 28b3088e48..cf6da807ca 100644 --- a/src/backends/backendsCommon/CMakeLists.txt +++ b/src/backends/backendsCommon/CMakeLists.txt @@ -34,6 +34,8 @@ list(APPEND armnnBackendsCommon_sources OptimizationViews.hpp TensorHandleFactoryRegistry.cpp TensorHandleFactoryRegistry.hpp + UnmapWorkload.cpp + UnmapWorkload.hpp WorkloadDataCollector.hpp Workload.hpp WorkloadData.cpp diff --git a/src/backends/backendsCommon/UnmapWorkload.cpp b/src/backends/backendsCommon/UnmapWorkload.cpp new file mode 100644 index 0000000000..b22158cf9f --- /dev/null +++ b/src/backends/backendsCommon/UnmapWorkload.cpp @@ -0,0 +1,22 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include <backendsCommon/UnmapWorkload.hpp> + +namespace armnn +{ + +UnmapWorkload::UnmapWorkload(const UnmapQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload<UnmapQueueDescriptor>(descriptor, info) +{ +} + +void UnmapWorkload::Execute() const +{ + m_Data.m_Inputs[0]->Unmap(); +} + +} //namespace armnn diff --git a/src/backends/backendsCommon/UnmapWorkload.hpp b/src/backends/backendsCommon/UnmapWorkload.hpp new file mode 100644 index 0000000000..7f13f0d295 --- /dev/null +++ b/src/backends/backendsCommon/UnmapWorkload.hpp @@ -0,0 +1,19 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "Workload.hpp" + +namespace armnn +{ + +class UnmapWorkload : public BaseWorkload<UnmapQueueDescriptor> +{ +public: + UnmapWorkload(const UnmapQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; +}; + +} //namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 134495991e..6d88664728 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -469,7 +469,25 @@ void MapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const const std::string descriptorName{"MapQueueDescriptor"}; ValidateNumInputs(workloadInfo, descriptorName, 1); - ValidateNumOutputs(workloadInfo, descriptorName , 0); + ValidateNumOutputs(workloadInfo, descriptorName, 0); + + for (unsigned int i = 0; i < m_Inputs.size(); ++i) + { + if (!m_Inputs[i]) + { + throw InvalidArgumentException( + fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i))); + } + } +} + +//--------------------------------------------------------------- +void UnmapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string descriptorName{"UnmapQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 1); + ValidateNumOutputs(workloadInfo, descriptorName, 0); for (unsigned int i = 0; i < m_Inputs.size(); ++i) { diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index be0a67e753..c563626b28 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -60,6 +60,11 @@ struct MapQueueDescriptor : QueueDescriptor void Validate(const WorkloadInfo& workloadInfo) const; }; +struct UnmapQueueDescriptor : QueueDescriptor +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + struct MemCopyQueueDescriptor : QueueDescriptor { void Validate(const WorkloadInfo& workloadInfo) const; diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index ceec2ac6e1..dd47d0a31f 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -20,6 +20,7 @@ COMMON_SOURCES := \ MemSyncWorkload.cpp \ OptimizationViews.cpp \ TensorHandleFactoryRegistry.cpp \ + UnmapWorkload.cpp \ WorkloadData.cpp \ WorkloadFactory.cpp \ WorkloadUtils.cpp diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 1078c2a8d7..a8465b45f6 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -7,6 +7,7 @@ #include <Graph.hpp> #include <backendsCommon/MapWorkload.hpp> +#include <backendsCommon/UnmapWorkload.hpp> #include <backendsCommon/WorkloadFactory.hpp> #include <armnn/utility/IgnoreUnused.hpp> @@ -214,6 +215,22 @@ struct DummyLayer<armnn::SplitterLayer> armnn::SplitterLayer* m_Layer; }; +template<> +struct DummyLayer<armnn::UnmapLayer, void> +{ + DummyLayer() + { + m_Layer = dummyGraph.AddLayer<armnn::UnmapLayer>(""); + } + + ~DummyLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + + armnn::UnmapLayer* m_Layer; +}; + template <typename ConvolutionLayerType> struct DummyConvolutionLayer { @@ -628,7 +645,6 @@ DECLARE_LAYER_POLICY_2_PARAM(Pooling2d) DECLARE_LAYER_POLICY_2_PARAM(PreCompiled) DECLARE_LAYER_POLICY_1_PARAM(Prelu) - DECLARE_LAYER_POLICY_2_PARAM(QLstm) DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm) @@ -665,6 +681,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Transpose) DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d) +DECLARE_LAYER_POLICY_MAP_PARAM(Unmap, void) + // Generic implementation to get the number of input slots for a given layer type; template<armnn::LayerType Type> @@ -798,6 +816,13 @@ bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Map>) return true; } +template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type> +bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Unmap>) +{ + IgnoreUnused(factory); + return true; +} + // Helper function to compute the next type in the LayerType enum. constexpr armnn::LayerType NextType(armnn::LayerType type) { |