ArmNN
 20.05
FullyConnectedTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 2 > FullyConnectedTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
LayerTestResult< float, 2 > FullyConnectedFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, bool transposeWeights)
 
LayerTestResult< float, 2 > FullyConnectedLargeTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool transposeWeights)
 

Function Documentation

◆ FullyConnectedFloat32Test()

LayerTestResult<float, 2> FullyConnectedFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 247 of file FullyConnectedTestImpl.cpp.

References armnn::Float32, LayerTestResult< T, n >::outputExpected, and armnn::swap().

252 {
253  unsigned int inputWidth = 1;
254  unsigned int inputHeight = 1;
255  unsigned int inputChannels = 5;
256  unsigned int inputNum = 2;
257 
258  unsigned int outputChannels = 3;
259  unsigned int outputNum = 2;
260 
261  // Define the tensor descriptors.
262  armnn::TensorInfo inputTensorInfo;
263  armnn::TensorInfo outputTensorInfo;
264  armnn::TensorInfo weightsDesc;
265  armnn::TensorInfo biasesDesc;
266 
267  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
268  unsigned int outputShape[] = { outputNum, outputChannels };
269  unsigned int weightsShape[] = { inputChannels, outputChannels };
270 
271  if (transposeWeights)
272  {
273  std::swap(weightsShape[0], weightsShape[1]);
274  }
275 
276  unsigned int biasShape[] = { outputChannels };
277 
278  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
279  outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
280  weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
281  biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
282 
283  LayerTestResult<float, 2> result(outputTensorInfo);
284 
285  boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
286  {
287  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
288 
289  5.0f, 4.0f, 3.0f, 2.0f, 1.0f
290  })
291  );
292 
293  boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
294  {
295  .5f, 2.f, .5f,
296  .5f, 2.f, 1.f,
297  .5f, 2.f, 2.f,
298  .5f, 2.f, 3.f,
299  .5f, 2.f, 4.f
300  }));
301 
302  if (transposeWeights)
303  {
304  weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
305  {
306  .5f, .5f, .5f, .5f, .5f,
307  2.f, 2.f, 2.f, 2.f, 2.f,
308  .5f, 1.f, 2.f, 3.f, 4.f
309  }));
310  }
311 
312 
313  std::vector<float> biasValues({0.f, 0.f, 0.f});
314  if (biasEnabled)
315  {
316  biasValues = std::vector<float>({10.f, 20.f, 30.f});
317  }
318  boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
319 
320  result = SimpleFullyConnectedTestImpl<float>(
321  workloadFactory,
322  memoryManager,
323  inputTensorInfo, outputTensorInfo,
324  weightsDesc, biasesDesc,
325  weights, bias, input,
326  biasEnabled, transposeWeights
327  );
328 
329  result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
330  {
331  0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
332  2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
333  0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
334 
335  2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
336  10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
337  2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
338  })
339  );
340 
341  return result;
342 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)

◆ FullyConnectedLargeTest()

LayerTestResult<float, 2> FullyConnectedLargeTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  transposeWeights 
)

Definition at line 344 of file FullyConnectedTestImpl.cpp.

348 {
349  return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
350 }

◆ FullyConnectedTest()

LayerTestResult<T, 2> FullyConnectedTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled 
)

Definition at line 71 of file FullyConnectedTestImpl.cpp.

References armnn::GetBiasTypeFromWeightsType(), LayerTestResult< T, n >::outputExpected, TensorInfo::SetQuantizationScale(), and true.

75 {
76  constexpr static unsigned int inputWidth = 3u;
77  constexpr static unsigned int inputHeight = 2u;
78  constexpr static unsigned int inputChannels = 1u;
79 
80  constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
81 
82  constexpr static unsigned int outputChannels = 2u;
83 
84  armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
85  inputTensorInfo.SetQuantizationScale(0.1f);
86  inputTensorInfo.SetQuantizationOffset(63);
87 
88  armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
89  outputTensorInfo.SetQuantizationScale(5.f);
90  outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
91 
92  armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
93  weightsDesc.SetQuantizationScale(0.2f);
94  weightsDesc.SetQuantizationOffset(93);
95 
96  armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
97  biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
98  biasesDesc.SetQuantizationOffset(0);
99 
100  LayerTestResult<T, 2> result(outputTensorInfo);
101 
102  auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
103  {
104  -1.2f, 6.1f, -3.5f,
105  18.8f, -5.5f, 2.9f
106  },
107  inputTensorInfo));
108 
109  auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
110  {
111  -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
112  23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
113  },
114  weightsDesc));
115 
116  auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
117 
118  result = SimpleFullyConnectedTestImpl<T>(
119  workloadFactory,
120  memoryManager,
121  inputTensorInfo, outputTensorInfo,
122  weightsDesc, biasesDesc,
123  weights, bias, input,
124  biasEnabled, true
125  );
126 
127  if (biasEnabled)
128  {
129  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
130  ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
131  }
132  else
133  {
134  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
135  ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
136  }
137 
138  return result;
139 }
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true