ArmNN
 21.08
FullyConnectedTestImpl.hpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 2 > FullyConnectedTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constantWeights)
 
LayerTestResult< float, 2 > FullyConnectedFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool transposeWeights)
 
LayerTestResult< float, 2 > FullyConnectedLargeTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights)
 

Function Documentation

◆ FullyConnectedFloat32Test()

LayerTestResult<float, 2> FullyConnectedFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 278 of file FullyConnectedTestImpl.cpp.

References armnn::Float32, LayerTestResult< T, n >::m_ExpectedData, and armnn::swap().

Referenced by TEST_SUITE().

284 {
285  unsigned int inputWidth = 1;
286  unsigned int inputHeight = 1;
287  unsigned int inputChannels = 5;
288  unsigned int inputNum = 2;
289 
290  unsigned int outputChannels = 3;
291  unsigned int outputNum = 2;
292 
293  // Define the tensor descriptors.
294  armnn::TensorInfo inputTensorInfo;
295  armnn::TensorInfo outputTensorInfo;
296  armnn::TensorInfo weightsDesc;
297  armnn::TensorInfo biasesDesc;
298 
299  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
300  unsigned int outputShape[] = { outputNum, outputChannels };
301  unsigned int weightsShape[] = { inputChannels, outputChannels };
302 
303  if (transposeWeights)
304  {
305  std::swap(weightsShape[0], weightsShape[1]);
306  }
307 
308  unsigned int biasShape[] = { outputChannels };
309 
310  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
311  outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
312  weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
313  biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
314 
315  LayerTestResult<float, 2> result(outputTensorInfo);
316 
317  std::vector<float> input =
318  {
319  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
320  5.0f, 4.0f, 3.0f, 2.0f, 1.0f
321  };
322 
323  std::vector<float> weights =
324  {
325  .5f, 2.f, .5f,
326  .5f, 2.f, 1.f,
327  .5f, 2.f, 2.f,
328  .5f, 2.f, 3.f,
329  .5f, 2.f, 4.f
330  };
331 
332  if (transposeWeights)
333  {
334  weights =
335  {
336  .5f, .5f, .5f, .5f, .5f,
337  2.f, 2.f, 2.f, 2.f, 2.f,
338  .5f, 1.f, 2.f, 3.f, 4.f
339  };
340  }
341 
342  std::vector<float> biasValues({0.f, 0.f, 0.f});
343  if (biasEnabled)
344  {
345  biasValues = std::vector<float>({10.f, 20.f, 30.f});
346  }
347 
348  result = SimpleFullyConnectedTestImpl<float>(
349  workloadFactory,
350  memoryManager,
351  tensorHandleFactory,
352  inputTensorInfo, outputTensorInfo,
353  weightsDesc, biasesDesc,
354  weights, biasValues, input,
355  biasEnabled, transposeWeights, true
356  );
357 
358  std::vector<float> expectedOutput =
359  {
360  0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
361  2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
362  0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
363 
364  2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
365  10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
366  2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
367  };
368  result.m_ExpectedData = expectedOutput;
369 
370  return result;
371 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)

◆ FullyConnectedLargeTest()

LayerTestResult<float, 2> FullyConnectedLargeTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  transposeWeights 
)

Definition at line 373 of file FullyConnectedTestImpl.cpp.

Referenced by TEST_SUITE().

378 {
379  return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
380  memoryManager,
381  tensorHandleFactory,
382  transposeWeights);
383 }

◆ FullyConnectedTest()

LayerTestResult<T, 2> FullyConnectedTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  constantWeights 
)

Definition at line 95 of file FullyConnectedTestImpl.cpp.

References armnn::GetBiasTypeFromWeightsType(), LayerTestResult< T, n >::m_ExpectedData, and TensorInfo::SetQuantizationScale().

101 {
102  constexpr static unsigned int inputWidth = 3u;
103  constexpr static unsigned int inputHeight = 2u;
104  constexpr static unsigned int inputChannels = 1u;
105 
106  constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
107 
108  constexpr static unsigned int outputChannels = 2u;
109 
110  armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
111  inputTensorInfo.SetQuantizationScale(0.1f);
112  inputTensorInfo.SetQuantizationOffset(63);
113 
114  armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
115  outputTensorInfo.SetQuantizationScale(5.f);
116  outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
117 
118  armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
119  weightsDesc.SetQuantizationScale(0.2f);
120  weightsDesc.SetQuantizationOffset(93);
121 
122  armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
123  biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
124  biasesDesc.SetQuantizationOffset(0);
125 
126  LayerTestResult<T, 2> result(outputTensorInfo);
127 
128  std::vector<T> input = ConvertToDataType<ArmnnType>(
129  {
130  -1.2f, 6.1f, -3.5f,
131  18.8f, -5.5f, 2.9f
132  },
133  inputTensorInfo);
134 
135  std::vector<T> weights = ConvertToDataType<ArmnnType>(
136  {
137  -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
138  23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
139  },
140  weightsDesc);
141 
142  std::vector<int32_t> bias = {9250, 67500};
143 
144  result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
145  memoryManager,
146  tensorHandleFactory,
147  inputTensorInfo,
148  outputTensorInfo,
149  weightsDesc,
150  biasesDesc,
151  weights,
152  bias,
153  input,
154  biasEnabled,
155  true,
156  constantWeights);
157 
158  if (biasEnabled)
159  {
160  result.m_ExpectedData = ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo);
161  }
162  else
163  {
164  result.m_ExpectedData = ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo);
165  }
166 
167  return result;
168 }
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475