ArmNN
 22.05
QuantizeQueueDescriptor Struct Reference

#include <WorkloadData.hpp>

Inheritance diagram for QuantizeQueueDescriptor:
QueueDescriptor

Public Member Functions

void Validate (const WorkloadInfo &workloadInfo) const
 
- Public Member Functions inherited from QueueDescriptor
virtual ~QueueDescriptor ()=default
 
void ValidateTensorNumDimensions (const TensorInfo &tensor, std::string const &descName, unsigned int numDimensions, std::string const &tensorName) const
 
void ValidateTensorNumDimNumElem (const TensorInfo &tensorInfo, unsigned int numDimension, unsigned int numElements, std::string const &tensorName) const
 
void ValidateInputsOutputs (const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
 
template<typename T >
const T * GetAdditionalInformation () const
 

Additional Inherited Members

- Public Attributes inherited from QueueDescriptor
std::vector< ITensorHandle * > m_Inputs
 
std::vector< ITensorHandle * > m_Outputs
 
void * m_AdditionalInfoObject
 
bool m_AllowExpandedDims = false
 
- Protected Member Functions inherited from QueueDescriptor
 QueueDescriptor ()
 
 QueueDescriptor (QueueDescriptor const &)=default
 
QueueDescriptoroperator= (QueueDescriptor const &)=default
 

Detailed Description

Definition at line 321 of file WorkloadData.hpp.

Member Function Documentation

◆ Validate()

void Validate ( const WorkloadInfo workloadInfo) const

Definition at line 2532 of file WorkloadData.cpp.

References armnn::BFloat16, armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::IsQuantizedType(), WorkloadInfo::m_InputTensorInfos, WorkloadInfo::m_OutputTensorInfos, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

2533 {
2534  const std::string descriptorName{"QuantizeQueueDescriptor"};
2535 
2536  ValidateNumInputs(workloadInfo, descriptorName, 1);
2537  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2538 
2539  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2540  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2541 
2542  std::vector<DataType> supportedTypes =
2543  {
2551  };
2552 
2553  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2554 
2555  if (!IsQuantizedType(outputTensorInfo.GetDataType()))
2556  {
2557  throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
2558  }
2559 }
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:280
std::vector< TensorInfo > m_InputTensorInfos
DataType GetDataType() const
Definition: Tensor.hpp:198
std::vector< TensorInfo > m_OutputTensorInfos

The documentation for this struct was generated from the following files: