// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "QuantizeLayer.hpp" #include "LayerCloneBase.hpp" #include namespace armnn { QuantizeLayer::QuantizeLayer(const char* name) : Layer(1, 1, LayerType::Quantize, name) {} std::unique_ptr QuantizeLayer::CreateWorkload(const IWorkloadFactory& factory) const { QuantizeQueueDescriptor descriptor; WorkloadInfo info = PrepInfoAndDesc(descriptor); return factory.CreateQuantize(descriptor, info); } Layer* QuantizeLayer::Clone(Graph& graph) const { QuantizeLayer* clone = CloneBase(graph, GetName()); return clone; } void QuantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { IgnoreUnused(shapeInferenceMethod); VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ConditionalThrowIfNotEqual( "QuantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", GetOutputSlot(0).GetTensorInfo().GetShape(), inferredShapes[0]); } void QuantizeLayer::Accept(ILayerVisitor& visitor) const { visitor.VisitQuantizeLayer(this, GetName()); } } //namespace armnn