ArmNN
 20.05
Convolution2dQueueDescriptor Struct Reference

#include <WorkloadData.hpp>

Inheritance diagram for Convolution2dQueueDescriptor:
QueueDescriptorWithParameters< Convolution2dDescriptor > QueueDescriptor

Public Member Functions

 Convolution2dQueueDescriptor ()
 
void Validate (const WorkloadInfo &workloadInfo) const
 
- Public Member Functions inherited from QueueDescriptor
void ValidateInputsOutputs (const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
 

Public Attributes

const ConstCpuTensorHandlem_Weight
 
const ConstCpuTensorHandlem_Bias
 
- Public Attributes inherited from QueueDescriptorWithParameters< Convolution2dDescriptor >
Convolution2dDescriptor m_Parameters
 
- Public Attributes inherited from QueueDescriptor
std::vector< ITensorHandle * > m_Inputs
 
std::vector< ITensorHandle * > m_Outputs
 

Additional Inherited Members

- Protected Member Functions inherited from QueueDescriptorWithParameters< Convolution2dDescriptor >
 ~QueueDescriptorWithParameters ()=default
 
 QueueDescriptorWithParameters ()=default
 
 QueueDescriptorWithParameters (QueueDescriptorWithParameters const &)=default
 
QueueDescriptorWithParametersoperator= (QueueDescriptorWithParameters const &)=default
 
- Protected Member Functions inherited from QueueDescriptor
 ~QueueDescriptor ()=default
 
 QueueDescriptor ()=default
 
 QueueDescriptor (QueueDescriptor const &)=default
 
QueueDescriptoroperator= (QueueDescriptor const &)=default
 

Detailed Description

Definition at line 168 of file WorkloadData.hpp.

Constructor & Destructor Documentation

◆ Convolution2dQueueDescriptor()

Definition at line 170 of file WorkloadData.hpp.

171  : m_Weight(nullptr)
172  , m_Bias(nullptr)
173  {
174  }
const ConstCpuTensorHandle * m_Bias
const ConstCpuTensorHandle * m_Weight

Member Function Documentation

◆ Validate()

void Validate ( const WorkloadInfo workloadInfo) const

Definition at line 1177 of file WorkloadData.cpp.

References armnn::BFloat16, armnn::Float16, armnn::Float32, armnn::GetBiasDataType(), TensorInfo::GetDataType(), WorkloadInfo::m_InputTensorInfos, WorkloadInfo::m_OutputTensorInfos, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by BOOST_AUTO_TEST_CASE().

1178 {
1179  const std::string descriptorName{"Convolution2dQueueDescriptor"};
1180 
1181  ValidateNumInputs(workloadInfo, descriptorName, 1);
1182  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1183 
1184  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1185  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1186 
1187  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1188  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1189 
1190  ValidatePointer(m_Weight, descriptorName, "weight");
1191 
1192  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1193  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1194 
1195  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1196 
1197  Optional<TensorInfo> optionalBiasTensorInfo;
1199  {
1200  ValidatePointer(m_Bias, descriptorName, "bias");
1201 
1202  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1203  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1204 
1205  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1206  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1207  }
1208 
1209  ValidatePerAxisQuantization(inputTensorInfo,
1210  outputTensorInfo,
1211  weightTensorInfo,
1212  optionalBiasTensorInfo,
1213  descriptorName);
1214 
1215  std::vector<DataType> supportedTypes =
1216  {
1224  };
1225 
1226  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1227 
1228  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1229  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1230  {
1231  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1232  {
1233  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1234  "for BFloat16 input.");
1235  }
1236  }
1237  else
1238  {
1239  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1240  }
1241 }
bool m_BiasEnabled
Enable/disable bias.
const ConstCpuTensorHandle * m_Bias
std::vector< TensorInfo > m_InputTensorInfos
DataType GetDataType() const
Definition: Tensor.hpp:95
const ConstCpuTensorHandle * m_Weight
std::vector< TensorInfo > m_OutputTensorInfos
DataType GetBiasDataType(DataType inputDataType)
const TensorInfo & GetTensorInfo() const

Member Data Documentation

◆ m_Bias

◆ m_Weight


The documentation for this struct was generated from the following files: