From 8efb48a6847c5cd166c561127ae6611150963ce3 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Fri, 19 May 2023 11:14:28 +0100 Subject: Update Doxygen docu for 23.05 Signed-off-by: Nikhil Raj Change-Id: I0a992286f14fa68fcc6e5eba31ac39fed003cbbe --- ...rmnn_1_1_fully_connected_queue_descriptor.xhtml | 288 +++++++++++++++++++++ 1 file changed, 288 insertions(+) create mode 100644 23.05/structarmnn_1_1_fully_connected_queue_descriptor.xhtml (limited to '23.05/structarmnn_1_1_fully_connected_queue_descriptor.xhtml') diff --git a/23.05/structarmnn_1_1_fully_connected_queue_descriptor.xhtml b/23.05/structarmnn_1_1_fully_connected_queue_descriptor.xhtml new file mode 100644 index 0000000000..9e9f3ade6c --- /dev/null +++ b/23.05/structarmnn_1_1_fully_connected_queue_descriptor.xhtml @@ -0,0 +1,288 @@ + + + + + + + + + + + + + +ArmNN: FullyConnectedQueueDescriptor Struct Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  23.05 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
FullyConnectedQueueDescriptor Struct Reference
+
+
+ +

#include <WorkloadData.hpp>

+
+Inheritance diagram for FullyConnectedQueueDescriptor:
+
+
+ + +QueueDescriptorWithParameters< FullyConnectedDescriptor > +QueueDescriptor + +
+ + + + + + + + + + + + + + + + + + + +

+Public Member Functions

void Validate (const WorkloadInfo &workloadInfo) const
 
- Public Member Functions inherited from QueueDescriptorWithParameters< FullyConnectedDescriptor >
virtual ~QueueDescriptorWithParameters ()=default
 
- Public Member Functions inherited from QueueDescriptor
virtual ~QueueDescriptor ()=default
 
void ValidateTensorNumDimensions (const TensorInfo &tensor, std::string const &descName, unsigned int numDimensions, std::string const &tensorName) const
 
void ValidateTensorNumDimNumElem (const TensorInfo &tensorInfo, unsigned int numDimension, unsigned int numElements, std::string const &tensorName) const
 
void ValidateInputsOutputs (const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
 
template<typename T >
const T * GetAdditionalInformation () const
 
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Additional Inherited Members

- Public Attributes inherited from QueueDescriptorWithParameters< FullyConnectedDescriptor >
FullyConnectedDescriptor m_Parameters
 
- Public Attributes inherited from QueueDescriptor
std::vector< ITensorHandle * > m_Inputs
 
std::vector< ITensorHandle * > m_Outputs
 
void * m_AdditionalInfoObject
 
bool m_AllowExpandedDims = false
 
- Protected Member Functions inherited from QueueDescriptorWithParameters< FullyConnectedDescriptor >
 QueueDescriptorWithParameters ()=default
 
 QueueDescriptorWithParameters (QueueDescriptorWithParameters const &)=default
 
QueueDescriptorWithParametersoperator= (QueueDescriptorWithParameters const &)=default
 
- Protected Member Functions inherited from QueueDescriptor
 QueueDescriptor ()
 
 QueueDescriptor (QueueDescriptor const &)=default
 
QueueDescriptoroperator= (QueueDescriptor const &)=default
 
+

Detailed Description

+
+

Definition at line 180 of file WorkloadData.hpp.

+

Member Function Documentation

+ +

◆ Validate()

+ +
+
+ + + + + + + + +
void Validate (const WorkloadInfoworkloadInfo) const
+
+ +

Definition at line 1059 of file WorkloadData.cpp.

+
1060 {
+
1061  const std::string descriptorName{"FullyConnectedQueueDescriptor"};
+
1062 
+
1063  uint32_t numInputs = 2;
+ +
1065  {
+
1066  numInputs = 3;
+
1067  }
+
1068 
+
1069  ValidateNumInputs(workloadInfo, descriptorName, numInputs);
+
1070  ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
1071 
+
1072  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+
1073  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
1074 
+
1075  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
+
1076 
+
1077  if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
+
1078  {
+
1079  throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
+
1080  }
+
1081 
+
1082  TensorInfo weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
+
1083  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
+
1084 
+ +
1086  {
+
1087  TensorInfo biasTensorInfo = workloadInfo.m_InputTensorInfos[2];
+
1088  // Validates type and quantization values.
+
1089  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
+
1090  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
+
1091  ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
+
1092  }
+
1093 
+
1094  // Check the supported data types
+
1095  std::vector<DataType> supportedTypes =
+
1096  {
+ + + + + + +
1103  };
+
1104 
+
1105  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+
1106 
+
1107  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
+
1108  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
+
1109  {
+
1110  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
+
1111  {
+
1112  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
+
1113  "for BFloat16 input.");
+
1114  }
+
1115  }
+
1116  else
+
1117  {
+
1118  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+
1119  }
+
1120 }
+
+

References armnn::BFloat16, armnn::Float16, armnn::Float32, armnn::GetBiasDataType(), TensorInfo::GetDataType(), TensorInfo::GetNumDimensions(), FullyConnectedDescriptor::m_BiasEnabled, WorkloadInfo::m_InputTensorInfos, WorkloadInfo::m_OutputTensorInfos, QueueDescriptorWithParameters< FullyConnectedDescriptor >::m_Parameters, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and QueueDescriptor::ValidateTensorNumDimensions().

+ +
+
+
The documentation for this struct was generated from the following files: +
+
+ +
bool m_BiasEnabled
Enable/disable bias.
+ +
DataType GetBiasDataType(DataType inputDataType)
+ +
std::vector< TensorInfo > m_OutputTensorInfos
+
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
+
void ValidateTensorNumDimensions(const TensorInfo &tensor, std::string const &descName, unsigned int numDimensions, std::string const &tensorName) const
+ + + + + + +
DataType GetDataType() const
Definition: Tensor.hpp:198
+
std::vector< TensorInfo > m_InputTensorInfos
+ + + + -- cgit v1.2.1