ArmNN
 22.11
FullyConnectedQueueDescriptor Struct Reference

#include <WorkloadData.hpp>

Inheritance diagram for FullyConnectedQueueDescriptor:
QueueDescriptorWithParameters< FullyConnectedDescriptor > QueueDescriptor

Public Member Functions

 FullyConnectedQueueDescriptor ()
 
void Validate (const WorkloadInfo &workloadInfo) const
 
- Public Member Functions inherited from QueueDescriptorWithParameters< FullyConnectedDescriptor >
virtual ~QueueDescriptorWithParameters ()=default
 
- Public Member Functions inherited from QueueDescriptor
virtual ~QueueDescriptor ()=default
 
void ValidateTensorNumDimensions (const TensorInfo &tensor, std::string const &descName, unsigned int numDimensions, std::string const &tensorName) const
 
void ValidateTensorNumDimNumElem (const TensorInfo &tensorInfo, unsigned int numDimension, unsigned int numElements, std::string const &tensorName) const
 
void ValidateInputsOutputs (const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
 
template<typename T >
const T * GetAdditionalInformation () const
 

Public Attributes

const ConstTensorHandlem_Weight
 
const ConstTensorHandlem_Bias
 
- Public Attributes inherited from QueueDescriptorWithParameters< FullyConnectedDescriptor >
FullyConnectedDescriptor m_Parameters
 
- Public Attributes inherited from QueueDescriptor
std::vector< ITensorHandle * > m_Inputs
 
std::vector< ITensorHandle * > m_Outputs
 
void * m_AdditionalInfoObject
 
bool m_AllowExpandedDims = false
 

Additional Inherited Members

- Protected Member Functions inherited from QueueDescriptorWithParameters< FullyConnectedDescriptor >
 QueueDescriptorWithParameters ()=default
 
 QueueDescriptorWithParameters (QueueDescriptorWithParameters const &)=default
 
QueueDescriptorWithParametersoperator= (QueueDescriptorWithParameters const &)=default
 
- Protected Member Functions inherited from QueueDescriptor
 QueueDescriptor ()
 
 QueueDescriptor (QueueDescriptor const &)=default
 
QueueDescriptoroperator= (QueueDescriptor const &)=default
 

Detailed Description

Definition at line 180 of file WorkloadData.hpp.

Constructor & Destructor Documentation

◆ FullyConnectedQueueDescriptor()

Definition at line 182 of file WorkloadData.hpp.

183  : m_Weight(nullptr)
184  , m_Bias(nullptr)
185  {
186  }
const ConstTensorHandle * m_Bias
const ConstTensorHandle * m_Weight

Member Function Documentation

◆ Validate()

void Validate ( const WorkloadInfo workloadInfo) const

Definition at line 1062 of file WorkloadData.cpp.

References armnn::BFloat16, armnn::Float16, armnn::Float32, armnn::GetBiasDataType(), TensorInfo::GetDataType(), TensorInfo::GetNumDimensions(), WorkloadInfo::m_InputTensorInfos, WorkloadInfo::m_OutputTensorInfos, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and QueueDescriptor::ValidateTensorNumDimensions().

1063 {
1064  const std::string descriptorName{"FullyConnectedQueueDescriptor"};
1065 
1066  uint32_t numInputs = 2;
1068  {
1069  numInputs = 3;
1070  }
1071 
1072  ValidateNumInputs(workloadInfo, descriptorName, numInputs);
1073  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1074 
1075  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1076  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1077 
1078  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1079 
1080  if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
1081  {
1082  throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
1083  }
1084 
1085  TensorInfo weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
1086  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
1087 
1089  {
1090  TensorInfo biasTensorInfo = workloadInfo.m_InputTensorInfos[2];
1091  // Validates type and quantization values.
1092  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1093  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1094  ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
1095  }
1096 
1097  // Check the supported data types
1098  std::vector<DataType> supportedTypes =
1099  {
1106  };
1107 
1108  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1109 
1110  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1111  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1112  {
1113  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1114  {
1115  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1116  "for BFloat16 input.");
1117  }
1118  }
1119  else
1120  {
1121  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1122  }
1123 }
std::vector< TensorInfo > m_InputTensorInfos
DataType GetDataType() const
Definition: Tensor.hpp:198
bool m_BiasEnabled
Enable/disable bias.
std::vector< TensorInfo > m_OutputTensorInfos
DataType GetBiasDataType(DataType inputDataType)
void ValidateTensorNumDimensions(const TensorInfo &tensor, std::string const &descName, unsigned int numDimensions, std::string const &tensorName) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195

Member Data Documentation

◆ m_Bias

◆ m_Weight


The documentation for this struct was generated from the following files: