ArmNN
 22.05
FullyConnectedTestImpl.cpp File Reference

Go to the source code of this file.

Functions

template<typename T , typename B >
LayerTestResult< T, 2 > SimpleFullyConnectedTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsTensorInfo, armnn::TensorInfo biasesTensorInfo, std::vector< T > &weights, std::vector< B > &bias, std::vector< T > &input, bool biasEnabled, bool transposeWeights, bool constantWeights)
 
template<armnn::DataType ArmnnType, typename T >
LayerTestResult< T, 2 > FullyConnectedTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constantWeights)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 2 > FullyConnectedLargeTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights, float qScale=0.0f, int32_t qOffset=0)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmU8 >, 2 > FullyConnectedTest< armnn::DataType::QAsymmU8 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constWeights)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QSymmS16 >, 2 > FullyConnectedTest< armnn::DataType::QSymmS16 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constWeights)
 
LayerTestResult< float, 2 > FullyConnectedFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool transposeWeights)
 
LayerTestResult< float, 2 > FullyConnectedLargeTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights)
 

Function Documentation

◆ FullyConnectedFloat32Test()

LayerTestResult<float, 2> FullyConnectedFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 280 of file FullyConnectedTestImpl.cpp.

References armnn::Float32, LayerTestResult< T, n >::m_ExpectedData, and armnn::swap().

Referenced by TEST_SUITE().

286 {
287  unsigned int inputWidth = 1;
288  unsigned int inputHeight = 1;
289  unsigned int inputChannels = 5;
290  unsigned int inputNum = 2;
291 
292  unsigned int outputChannels = 3;
293  unsigned int outputNum = 2;
294 
295  // Define the tensor descriptors.
296  armnn::TensorInfo inputTensorInfo;
297  armnn::TensorInfo outputTensorInfo;
298  armnn::TensorInfo weightsDesc;
299  armnn::TensorInfo biasesDesc;
300 
301  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
302  unsigned int outputShape[] = { outputNum, outputChannels };
303  unsigned int weightsShape[] = { inputChannels, outputChannels };
304 
305  if (transposeWeights)
306  {
307  std::swap(weightsShape[0], weightsShape[1]);
308  }
309 
310  unsigned int biasShape[] = { outputChannels };
311 
312  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
313  outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
314  weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
315  biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
316 
317  LayerTestResult<float, 2> result(outputTensorInfo);
318 
319  std::vector<float> input =
320  {
321  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
322  5.0f, 4.0f, 3.0f, 2.0f, 1.0f
323  };
324 
325  std::vector<float> weights =
326  {
327  .5f, 2.f, .5f,
328  .5f, 2.f, 1.f,
329  .5f, 2.f, 2.f,
330  .5f, 2.f, 3.f,
331  .5f, 2.f, 4.f
332  };
333 
334  if (transposeWeights)
335  {
336  weights =
337  {
338  .5f, .5f, .5f, .5f, .5f,
339  2.f, 2.f, 2.f, 2.f, 2.f,
340  .5f, 1.f, 2.f, 3.f, 4.f
341  };
342  }
343 
344  std::vector<float> biasValues({0.f, 0.f, 0.f});
345  if (biasEnabled)
346  {
347  biasValues = std::vector<float>({10.f, 20.f, 30.f});
348  }
349 
350  result = SimpleFullyConnectedTestImpl<float>(
351  workloadFactory,
352  memoryManager,
353  tensorHandleFactory,
354  inputTensorInfo, outputTensorInfo,
355  weightsDesc, biasesDesc,
356  weights, biasValues, input,
357  biasEnabled, transposeWeights, true
358  );
359 
360  std::vector<float> expectedOutput =
361  {
362  0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
363  2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
364  0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
365 
366  2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
367  10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
368  2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
369  };
370  result.m_ExpectedData = expectedOutput;
371 
372  return result;
373 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)

◆ FullyConnectedLargeTest()

LayerTestResult<float, 2> FullyConnectedLargeTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  transposeWeights 
)

Definition at line 375 of file FullyConnectedTestImpl.cpp.

Referenced by TEST_SUITE().

380 {
381  return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
382  memoryManager,
383  tensorHandleFactory,
384  transposeWeights);
385 }

◆ FullyConnectedLargeTestCommon()

LayerTestResult<T, 2> FullyConnectedLargeTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  transposeWeights,
float  qScale = 0.0f,
int32_t  qOffset = 0 
)

Definition at line 179 of file FullyConnectedTestImpl.cpp.

References LayerTestResult< T, n >::m_ExpectedData, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::swap().

186 {
187  unsigned int inputWidth = 1;
188  unsigned int inputHeight = 1;
189  unsigned int inputChannels = 5;
190  unsigned int inputNum = 1;
191 
192  unsigned int outputChannels = 1;
193  unsigned int outputNum = 1;
194 
195  // Define the tensor descriptors.
196  armnn::TensorInfo inputTensorInfo;
197  armnn::TensorInfo outputTensorInfo;
198  armnn::TensorInfo weightsDesc;
199  armnn::TensorInfo biasesDesc;
200 
201  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
202  unsigned int outputShape[] = { outputNum, outputChannels };
203  unsigned int weightsShape[] = { inputChannels, outputChannels };
204  if (transposeWeights)
205  {
206  std::swap(weightsShape[0], weightsShape[1]);
207  }
208 
209  unsigned int biasShape[] = { outputChannels };
210 
211  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
212  outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
213  weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
214  biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
215 
216  // Set quantization parameters if the requested type is a quantized type.
217  if(armnn::IsQuantizedType<T>())
218  {
219  inputTensorInfo.SetQuantizationScale(qScale);
220  inputTensorInfo.SetQuantizationOffset(qOffset);
221  outputTensorInfo.SetQuantizationScale(qScale);
222  outputTensorInfo.SetQuantizationOffset(qOffset);
223  }
224 
225  LayerTestResult<T, 2> result(outputTensorInfo);
226 
227  std::vector<T> input = armnnUtils::QuantizedVector<T>(
228  {
229  1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
230  },
231  qScale, qOffset);
232 
233  std::vector<T> weights = armnnUtils::QuantizedVector<T>(
234  {
235  2.0f, 3.0f, 4.0f, 5.0f, 6.0f
236  },
237  qScale, qOffset);
238 
239  std::vector<T> biasValues({900000.f});
240 
241  result = SimpleFullyConnectedTestImpl<T>(
242  workloadFactory,
243  memoryManager,
244  tensorHandleFactory,
245  inputTensorInfo, outputTensorInfo,
246  weightsDesc, biasesDesc,
247  weights, biasValues, input,
248  true, transposeWeights, true
249  );
250 
251  result.m_ExpectedData = armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset);
252 
253  return result;
254 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489

◆ FullyConnectedTest()

LayerTestResult<T, 2> FullyConnectedTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  constantWeights 
)

Definition at line 97 of file FullyConnectedTestImpl.cpp.

References armnn::GetBiasTypeFromWeightsType(), LayerTestResult< T, n >::m_ExpectedData, and TensorInfo::SetQuantizationScale().

103 {
104  constexpr static unsigned int inputWidth = 3u;
105  constexpr static unsigned int inputHeight = 2u;
106  constexpr static unsigned int inputChannels = 1u;
107 
108  constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
109 
110  constexpr static unsigned int outputChannels = 2u;
111 
112  armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
113  inputTensorInfo.SetQuantizationScale(0.1f);
114  inputTensorInfo.SetQuantizationOffset(63);
115 
116  armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
117  outputTensorInfo.SetQuantizationScale(5.f);
118  outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
119 
120  armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
121  weightsDesc.SetQuantizationScale(0.2f);
122  weightsDesc.SetQuantizationOffset(93);
123 
124  armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
125  biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
126  biasesDesc.SetQuantizationOffset(0);
127 
128  LayerTestResult<T, 2> result(outputTensorInfo);
129 
130  std::vector<T> input = ConvertToDataType<ArmnnType>(
131  {
132  -1.2f, 6.1f, -3.5f,
133  18.8f, -5.5f, 2.9f
134  },
135  inputTensorInfo);
136 
137  std::vector<T> weights = ConvertToDataType<ArmnnType>(
138  {
139  -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
140  23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
141  },
142  weightsDesc);
143 
144  std::vector<int32_t> bias = {9250, 67500};
145 
146  result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
147  memoryManager,
148  tensorHandleFactory,
149  inputTensorInfo,
150  outputTensorInfo,
151  weightsDesc,
152  biasesDesc,
153  weights,
154  bias,
155  input,
156  biasEnabled,
157  true,
158  constantWeights);
159 
160  if (biasEnabled)
161  {
162  result.m_ExpectedData = ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo);
163  }
164  else
165  {
166  result.m_ExpectedData = ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo);
167  }
168 
169  return result;
170 }
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473

◆ FullyConnectedTest< armnn::DataType::QAsymmU8 >()

template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2> FullyConnectedTest< armnn::DataType::QAsymmU8 > ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  constWeights 
)

◆ FullyConnectedTest< armnn::DataType::QSymmS16 >()

template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2> FullyConnectedTest< armnn::DataType::QSymmS16 > ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  constWeights 
)

◆ SimpleFullyConnectedTestImpl()

LayerTestResult<T, 2> SimpleFullyConnectedTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
armnn::TensorInfo  inputTensorInfo,
armnn::TensorInfo  outputTensorInfo,
armnn::TensorInfo  weightsTensorInfo,
armnn::TensorInfo  biasesTensorInfo,
std::vector< T > &  weights,
std::vector< B > &  bias,
std::vector< T > &  input,
bool  biasEnabled,
bool  transposeWeights,
bool  constantWeights 
)

Definition at line 24 of file FullyConnectedTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::FullyConnected, TensorInfo::GetNumElements(), and LayerTestResult< T, n >::m_ActualData.

38 {
39  std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
40  std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo);
41  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
42 
43  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
44 
47  armnn::ScopedTensorHandle weightsTensor(weightsTensorInfo);
48  armnn::ScopedTensorHandle biasTensor(biasesTensorInfo);
49 
50  AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data());
51  AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
52 
53  AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
54  AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
55  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
56 
57  // Need to set as layer members will be null when creating the workload because the optimization hasn't been run.
58  data.m_Weight = &weightsTensor;
59  data.m_Bias = &biasTensor;
60 
61  data.m_Parameters.m_BiasEnabled = biasEnabled;
62  data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
63  data.m_Parameters.m_ConstantWeights = constantWeights;
64 
65  std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
66  if (biasEnabled)
67  {
68  input2Handle = tensorHandleFactory.CreateTensorHandle(biasesTensorInfo);
69  AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
70  }
71 
72  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::FullyConnected,
73  data,
74  info);
75  LayerTestResult<T, 2> result(outputTensorInfo);
76 
77  input0Handle->Allocate();
78  input1Handle->Allocate();
79  outputHandle->Allocate();
80  CopyDataToITensorHandle(input0Handle.get(), input.data());
81  CopyDataToITensorHandle(input1Handle.get(), weights.data());
82  if (biasEnabled)
83  {
84  input2Handle->Allocate();
85  CopyDataToITensorHandle(input2Handle.get(), bias.data());
86  }
87 
88  ExecuteWorkload(*workload, memoryManager);
89 
90  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
91  result.m_ActualData = actualOutput;
92 
93  return result;
94 }
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196