diff options
Diffstat (limited to 'src/armnn/layers')
-rw-r--r-- | src/armnn/layers/DequantizeLayer.cpp | 52 | ||||
-rw-r--r-- | src/armnn/layers/DequantizeLayer.hpp | 42 |
2 files changed, 94 insertions, 0 deletions
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp new file mode 100644 index 0000000000..4dd30de77b --- /dev/null +++ b/src/armnn/layers/DequantizeLayer.cpp @@ -0,0 +1,52 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "DequantizeLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +namespace armnn +{ + +DequantizeLayer::DequantizeLayer(const char* name) + : Layer(1, 1, LayerType::Dequantize, name) +{} + +std::unique_ptr<IWorkload> DequantizeLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + DequantizeQueueDescriptor descriptor; + + return factory.CreateDequantize(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const +{ + return CloneBase<DequantizeLayer>(graph, GetName()); +} + +void DequantizeLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + std::vector<TensorShape> inferredShapes = InferOutputShapes({ + GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual<LayerValidationException>( + "DequantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void DequantizeLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitDequantizeLayer(this, GetName()); +} + +} // namespace armnn diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp new file mode 100644 index 0000000000..1340f96a27 --- /dev/null +++ b/src/armnn/layers/DequantizeLayer.hpp @@ -0,0 +1,42 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "Layer.hpp" + +namespace armnn +{ + +/// This layer dequantizes the input tensor. +class DequantizeLayer : public Layer +{ +public: + /// Makes a workload for the Dequantize type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + DequantizeLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref DequantizeLayer. + void ValidateTensorShapesFromInputs() override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a DequantizeLayer. + /// @param [in] name Optional name for the layer. + DequantizeLayer(const char* name); + + /// Default destructor + ~DequantizeLayer() = default; +}; + +} // namespace armnn |