ArmNN
 21.05
FullyConnectedTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 2 > FullyConnectedTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constantWeights)
 
LayerTestResult< float, 2 > FullyConnectedFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool transposeWeights)
 
LayerTestResult< float, 2 > FullyConnectedLargeTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights)
 

Function Documentation

◆ FullyConnectedFloat32Test()

LayerTestResult<float, 2> FullyConnectedFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 334 of file FullyConnectedTestImpl.cpp.

References armnn::Float32, LayerTestResult< T, n >::outputExpected, and armnn::swap().

340 {
341  unsigned int inputWidth = 1;
342  unsigned int inputHeight = 1;
343  unsigned int inputChannels = 5;
344  unsigned int inputNum = 2;
345 
346  unsigned int outputChannels = 3;
347  unsigned int outputNum = 2;
348 
349  // Define the tensor descriptors.
350  armnn::TensorInfo inputTensorInfo;
351  armnn::TensorInfo outputTensorInfo;
352  armnn::TensorInfo weightsDesc;
353  armnn::TensorInfo biasesDesc;
354 
355  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
356  unsigned int outputShape[] = { outputNum, outputChannels };
357  unsigned int weightsShape[] = { inputChannels, outputChannels };
358 
359  if (transposeWeights)
360  {
361  std::swap(weightsShape[0], weightsShape[1]);
362  }
363 
364  unsigned int biasShape[] = { outputChannels };
365 
366  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
367  outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
368  weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
369  biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
370 
371  LayerTestResult<float, 2> result(outputTensorInfo);
372 
373  boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
374  {
375  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
376 
377  5.0f, 4.0f, 3.0f, 2.0f, 1.0f
378  })
379  );
380 
381  boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
382  {
383  .5f, 2.f, .5f,
384  .5f, 2.f, 1.f,
385  .5f, 2.f, 2.f,
386  .5f, 2.f, 3.f,
387  .5f, 2.f, 4.f
388  }));
389 
390  if (transposeWeights)
391  {
392  weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
393  {
394  .5f, .5f, .5f, .5f, .5f,
395  2.f, 2.f, 2.f, 2.f, 2.f,
396  .5f, 1.f, 2.f, 3.f, 4.f
397  }));
398  }
399 
400 
401  std::vector<float> biasValues({0.f, 0.f, 0.f});
402  if (biasEnabled)
403  {
404  biasValues = std::vector<float>({10.f, 20.f, 30.f});
405  }
406  boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
407 
408  result = SimpleFullyConnectedTestImpl<float>(
409  workloadFactory,
410  memoryManager,
411  tensorHandleFactory,
412  inputTensorInfo, outputTensorInfo,
413  weightsDesc, biasesDesc,
414  weights, bias, input,
415  biasEnabled, transposeWeights
416  );
417 
418  result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
419  {
420  0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
421  2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
422  0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
423 
424  2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
425  10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
426  2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
427  })
428  );
429 
430  return result;
431 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)

◆ FullyConnectedLargeTest()

LayerTestResult<float, 2> FullyConnectedLargeTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  transposeWeights 
)

Definition at line 433 of file FullyConnectedTestImpl.cpp.

438 {
439  return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
440  memoryManager,
441  tensorHandleFactory,
442  transposeWeights);
443 }

◆ FullyConnectedTest()

LayerTestResult<T, 2> FullyConnectedTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  constantWeights 
)

Definition at line 128 of file FullyConnectedTestImpl.cpp.

References armnn::GetBiasTypeFromWeightsType(), LayerTestResult< T, n >::outputExpected, and TensorInfo::SetQuantizationScale().

Referenced by armnn::GetVector().

134 {
135  constexpr static unsigned int inputWidth = 3u;
136  constexpr static unsigned int inputHeight = 2u;
137  constexpr static unsigned int inputChannels = 1u;
138 
139  constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
140 
141  constexpr static unsigned int outputChannels = 2u;
142 
143  armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
144  inputTensorInfo.SetQuantizationScale(0.1f);
145  inputTensorInfo.SetQuantizationOffset(63);
146 
147  armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
148  outputTensorInfo.SetQuantizationScale(5.f);
149  outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
150 
151  armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
152  weightsDesc.SetQuantizationScale(0.2f);
153  weightsDesc.SetQuantizationOffset(93);
154 
155  armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
156  biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
157  biasesDesc.SetQuantizationOffset(0);
158 
159  LayerTestResult<T, 2> result(outputTensorInfo);
160 
161  auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
162  {
163  -1.2f, 6.1f, -3.5f,
164  18.8f, -5.5f, 2.9f
165  },
166  inputTensorInfo));
167 
168  auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
169  {
170  -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
171  23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
172  },
173  weightsDesc));
174 
175  auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
176 
177  if (constantWeights)
178  {
179  result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
180  memoryManager,
181  tensorHandleFactory,
182  inputTensorInfo,
183  outputTensorInfo,
184  weightsDesc,
185  biasesDesc,
186  weights,
187  bias,
188  input,
189  biasEnabled,
190  true);
191  }
192  else
193  {
194  result = SimpleFullyConnectedTestWeightsAsInputsImpl<T>(workloadFactory,
195  memoryManager,
196  tensorHandleFactory,
197  inputTensorInfo,
198  outputTensorInfo,
199  weightsDesc,
200  biasesDesc,
201  weights,
202  bias,
203  input,
204  biasEnabled,
205  true);
206  }
207 
208  if (biasEnabled)
209  {
210  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
211  ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
212  }
213  else
214  {
215  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
216  ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
217  }
218 
219  return result;
220 }
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464