ArmNN
 22.11
QuantizeQueueDescriptor Struct Reference

#include <WorkloadData.hpp>

Inheritance diagram for QuantizeQueueDescriptor:
QueueDescriptor

Public Member Functions

void Validate (const WorkloadInfo &workloadInfo) const
 
- Public Member Functions inherited from QueueDescriptor
virtual ~QueueDescriptor ()=default
 
void ValidateTensorNumDimensions (const TensorInfo &tensor, std::string const &descName, unsigned int numDimensions, std::string const &tensorName) const
 
void ValidateTensorNumDimNumElem (const TensorInfo &tensorInfo, unsigned int numDimension, unsigned int numElements, std::string const &tensorName) const
 
void ValidateInputsOutputs (const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
 
template<typename T >
const T * GetAdditionalInformation () const
 

Additional Inherited Members

- Public Attributes inherited from QueueDescriptor
std::vector< ITensorHandle * > m_Inputs
 
std::vector< ITensorHandle * > m_Outputs
 
void * m_AdditionalInfoObject
 
bool m_AllowExpandedDims = false
 
- Protected Member Functions inherited from QueueDescriptor
 QueueDescriptor ()
 
 QueueDescriptor (QueueDescriptor const &)=default
 
QueueDescriptoroperator= (QueueDescriptor const &)=default
 

Detailed Description

Definition at line 321 of file WorkloadData.hpp.

Member Function Documentation

◆ Validate()

void Validate ( const WorkloadInfo workloadInfo) const

Definition at line 2485 of file WorkloadData.cpp.

References armnn::BFloat16, armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::IsQuantizedType(), WorkloadInfo::m_InputTensorInfos, WorkloadInfo::m_OutputTensorInfos, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

2486 {
2487  const std::string descriptorName{"QuantizeQueueDescriptor"};
2488 
2489  ValidateNumInputs(workloadInfo, descriptorName, 1);
2490  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2491 
2492  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2493  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2494 
2495  std::vector<DataType> supportedTypes =
2496  {
2504  };
2505 
2506  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2507 
2508  if (!IsQuantizedType(outputTensorInfo.GetDataType()))
2509  {
2510  throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
2511  }
2512 }
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:280
std::vector< TensorInfo > m_InputTensorInfos
DataType GetDataType() const
Definition: Tensor.hpp:198
std::vector< TensorInfo > m_OutputTensorInfos

The documentation for this struct was generated from the following files: