29 return CloneBase<FakeQuantizationLayer>(graph,
m_Param,
GetName());
38 BOOST_ASSERT(inferredShapes.size() == 1);
40 ConditionalThrowIfNotEqual<LayerValidationException>(
41 "FakeQuantizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
48 boost::ignore_unused(visitor);
49 throw armnn::Exception(
"FakeQuantizationLayer should not appear in an input graph");
This layer represents a fake quantization operation.
const char * GetName() const override
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
virtual const TensorInfo & GetTensorInfo() const =0
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
FakeQuantizationLayer * Clone(Graph &graph) const override
void Accept(ILayerVisitor &visitor) const override
FakeQuantizationDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
Base class for all ArmNN exceptions so that users can filter to just those.
void ValidateTensorShapesFromInputs() override
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
FakeQuantizationLayer(const FakeQuantizationDescriptor &descriptor, const char *name)
const TensorShape & GetShape() const
const TensorInfo & GetTensorInfo() const override
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
const InputSlot & GetInputSlot(unsigned int index) const override