ArmNN
 20.08
QuantizeLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "QuantizeLayer.hpp"
7 
8 #include "LayerCloneBase.hpp"
9 
10 #include <armnn/ILayerVisitor.hpp>
11 
12 namespace armnn
13 {
14 
16 : Layer(1, 1, LayerType::Quantize, name)
17 {}
18 
19 std::unique_ptr<IWorkload> QuantizeLayer::CreateWorkload(const IWorkloadFactory& factory) const
20 {
21  QuantizeQueueDescriptor descriptor;
22  WorkloadInfo info = PrepInfoAndDesc(descriptor);
23  return factory.CreateQuantize(descriptor, info);
24 }
25 
27 {
28  QuantizeLayer* clone = CloneBase<QuantizeLayer>(graph, GetName());
29  return clone;
30 }
31 
33 {
35 
36  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
37 
39 
40  auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
41 
42  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizeLayer");
43 }
44 
46 {
47  visitor.VisitQuantizeLayer(this, GetName());
48 }
49 
50 } //namespace armnn
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition: Layer.cpp:365
void Accept(ILayerVisitor &visitor) const override
Apply a visitor to this layer.
virtual void VisitQuantizeLayer(const IConnectableLayer *layer, const char *name=nullptr)=0
Function a quantize layer should call back to when its Accept(ILayerVisitor&) function is invoked...
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:432
Copyright (c) 2020 ARM Limited.
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:392
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:344
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
QuantizeLayer(const char *name)
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
Definition: Layer.hpp:366
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
Layer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:31
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void ValidateTensorShapesFromInputs() override
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
virtual const TensorInfo & GetTensorInfo() const =0
Contains information about inputs and outputs to a layer.
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:307
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:387