ArmNN
 20.02
FullyConnectedTestImpl.cpp File Reference

Go to the source code of this file.

Functions

template<typename T , typename B >
LayerTestResult< T, 2 > SimpleFullyConnectedTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsDesc, armnn::TensorInfo biasesDesc, boost::multi_array< T, 2 > &weights, boost::multi_array< B, 1 > &bias, boost::multi_array< T, 4 > &input, bool biasEnabled, bool transposeWeights)
 
template<armnn::DataType ArmnnType, typename T >
LayerTestResult< T, 2 > FullyConnectedTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 2 > FullyConnectedLargeTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool transposeWeights, float qScale=0.0f, int32_t qOffset=0)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmU8 >, 2 > FullyConnectedTest< armnn::DataType::QAsymmU8 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QSymmS16 >, 2 > FullyConnectedTest< armnn::DataType::QSymmS16 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
LayerTestResult< float, 2 > FullyConnectedFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, bool transposeWeights)
 
LayerTestResult< float, 2 > FullyConnectedLargeTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool transposeWeights)
 

Function Documentation

◆ FullyConnectedFloat32Test()

LayerTestResult<float, 2> FullyConnectedFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 247 of file FullyConnectedTestImpl.cpp.

References armnn::Float32, LayerTestResult< T, n >::outputExpected, and armnn::swap().

252 {
253  unsigned int inputWidth = 1;
254  unsigned int inputHeight = 1;
255  unsigned int inputChannels = 5;
256  unsigned int inputNum = 2;
257 
258  unsigned int outputChannels = 3;
259  unsigned int outputNum = 2;
260 
261  // Define the tensor descriptors.
262  armnn::TensorInfo inputTensorInfo;
263  armnn::TensorInfo outputTensorInfo;
264  armnn::TensorInfo weightsDesc;
265  armnn::TensorInfo biasesDesc;
266 
267  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
268  unsigned int outputShape[] = { outputNum, outputChannels };
269  unsigned int weightsShape[] = { inputChannels, outputChannels };
270 
271  if (transposeWeights)
272  {
273  std::swap(weightsShape[0], weightsShape[1]);
274  }
275 
276  unsigned int biasShape[] = { outputChannels };
277 
278  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
279  outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
280  weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
281  biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
282 
283  LayerTestResult<float, 2> result(outputTensorInfo);
284 
285  boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
286  {
287  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
288 
289  5.0f, 4.0f, 3.0f, 2.0f, 1.0f
290  })
291  );
292 
293  boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
294  {
295  .5f, 2.f, .5f,
296  .5f, 2.f, 1.f,
297  .5f, 2.f, 2.f,
298  .5f, 2.f, 3.f,
299  .5f, 2.f, 4.f
300  }));
301 
302  if (transposeWeights)
303  {
304  weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
305  {
306  .5f, .5f, .5f, .5f, .5f,
307  2.f, 2.f, 2.f, 2.f, 2.f,
308  .5f, 1.f, 2.f, 3.f, 4.f
309  }));
310  }
311 
312 
313  std::vector<float> biasValues({0.f, 0.f, 0.f});
314  if (biasEnabled)
315  {
316  biasValues = std::vector<float>({10.f, 20.f, 30.f});
317  }
318  boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
319 
320  result = SimpleFullyConnectedTestImpl<float>(
321  workloadFactory,
322  memoryManager,
323  inputTensorInfo, outputTensorInfo,
324  weightsDesc, biasesDesc,
325  weights, bias, input,
326  biasEnabled, transposeWeights
327  );
328 
329  result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
330  {
331  0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
332  2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
333  0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
334 
335  2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
336  10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
337  2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
338  })
339  );
340 
341  return result;
342 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)

◆ FullyConnectedLargeTest()

LayerTestResult<float, 2> FullyConnectedLargeTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  transposeWeights 
)

Definition at line 344 of file FullyConnectedTestImpl.cpp.

348 {
349  return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
350 }

◆ FullyConnectedLargeTestCommon()

LayerTestResult<T, 2> FullyConnectedLargeTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  transposeWeights,
float  qScale = 0.0f,
int32_t  qOffset = 0 
)

Definition at line 148 of file FullyConnectedTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::swap().

154 {
155  unsigned int inputWidth = 1;
156  unsigned int inputHeight = 1;
157  unsigned int inputChannels = 5;
158  unsigned int inputNum = 1;
159 
160  unsigned int outputChannels = 1;
161  unsigned int outputNum = 1;
162 
163  // Define the tensor descriptors.
164  armnn::TensorInfo inputTensorInfo;
165  armnn::TensorInfo outputTensorInfo;
166  armnn::TensorInfo weightsDesc;
167  armnn::TensorInfo biasesDesc;
168 
169  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
170  unsigned int outputShape[] = { outputNum, outputChannels };
171  unsigned int weightsShape[] = { inputChannels, outputChannels };
172  if (transposeWeights)
173  {
174  std::swap(weightsShape[0], weightsShape[1]);
175  }
176 
177  unsigned int biasShape[] = { outputChannels };
178 
179  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
180  outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
181  weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
182  biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
183 
184  // Set quantization parameters if the requested type is a quantized type.
185  if(armnn::IsQuantizedType<T>())
186  {
187  inputTensorInfo.SetQuantizationScale(qScale);
188  inputTensorInfo.SetQuantizationOffset(qOffset);
189  outputTensorInfo.SetQuantizationScale(qScale);
190  outputTensorInfo.SetQuantizationOffset(qOffset);
191  }
192 
193  LayerTestResult<T, 2> result(outputTensorInfo);
194 
195  boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
196  armnnUtils::QuantizedVector<T>({
197  1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
198  },
199  qScale, qOffset)
200  );
201 
202  boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
203  armnnUtils::QuantizedVector<T>({
204  2.0f, 3.0f, 4.0f, 5.0f, 6.0f
205  },
206  qScale, qOffset)
207  );
208 
209  std::vector<T> biasValues({900000.f});
210  boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
211 
212  result = SimpleFullyConnectedTestImpl<T>(
213  workloadFactory,
214  memoryManager,
215  inputTensorInfo, outputTensorInfo,
216  weightsDesc, biasesDesc,
217  weights, bias, input,
218  true, transposeWeights
219  );
220 
221  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
222  armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
223 
224  return result;
225 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:275

◆ FullyConnectedTest()

LayerTestResult<T, 2> FullyConnectedTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled 
)

Definition at line 71 of file FullyConnectedTestImpl.cpp.

References armnn::GetBiasTypeFromWeightsType(), LayerTestResult< T, n >::outputExpected, TensorInfo::SetQuantizationScale(), and true.

75 {
76  constexpr static unsigned int inputWidth = 3u;
77  constexpr static unsigned int inputHeight = 2u;
78  constexpr static unsigned int inputChannels = 1u;
79 
80  constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
81 
82  constexpr static unsigned int outputChannels = 2u;
83 
84  armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
85  inputTensorInfo.SetQuantizationScale(0.1f);
86  inputTensorInfo.SetQuantizationOffset(63);
87 
88  armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
89  outputTensorInfo.SetQuantizationScale(5.f);
90  outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
91 
92  armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
93  weightsDesc.SetQuantizationScale(0.2f);
94  weightsDesc.SetQuantizationOffset(93);
95 
96  armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
97  biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
98  biasesDesc.SetQuantizationOffset(0);
99 
100  LayerTestResult<T, 2> result(outputTensorInfo);
101 
102  auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
103  {
104  -1.2f, 6.1f, -3.5f,
105  18.8f, -5.5f, 2.9f
106  },
107  inputTensorInfo));
108 
109  auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
110  {
111  -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
112  23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
113  },
114  weightsDesc));
115 
116  auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
117 
118  result = SimpleFullyConnectedTestImpl<T>(
119  workloadFactory,
120  memoryManager,
121  inputTensorInfo, outputTensorInfo,
122  weightsDesc, biasesDesc,
123  weights, bias, input,
124  biasEnabled, true
125  );
126 
127  if (biasEnabled)
128  {
129  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
130  ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
131  }
132  else
133  {
134  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
135  ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
136  }
137 
138  return result;
139 }
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259

◆ FullyConnectedTest< armnn::DataType::QAsymmU8 >()

◆ FullyConnectedTest< armnn::DataType::QSymmS16 >()

◆ SimpleFullyConnectedTestImpl()

LayerTestResult<T, 2> SimpleFullyConnectedTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::TensorInfo  inputTensorInfo,
armnn::TensorInfo  outputTensorInfo,
armnn::TensorInfo  weightsDesc,
armnn::TensorInfo  biasesDesc,
boost::multi_array< T, 2 > &  weights,
boost::multi_array< B, 1 > &  bias,
boost::multi_array< T, 4 > &  input,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 24 of file FullyConnectedTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateFullyConnected(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), FullyConnectedQueueDescriptor::m_Bias, FullyConnectedDescriptor::m_BiasEnabled, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, FullyConnectedDescriptor::m_TransposeWeightMatrix, FullyConnectedQueueDescriptor::m_Weight, and LayerTestResult< T, n >::output.

36 {
37  IgnoreUnused(memoryManager);
38  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
39  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
40 
43  armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
44  armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
45 
46  AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
47  AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
48 
49  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
50  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
51  data.m_Weight = &weightsTensor;
52  data.m_Bias = &biasTensor;
53  data.m_Parameters.m_BiasEnabled = biasEnabled;
54  data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
55 
56  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
57  LayerTestResult<T, 2> result(outputTensorInfo);
58 
59  inputHandle->Allocate();
60  outputHandle->Allocate();
61  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
62 
63  ExecuteWorkload(*workload, memoryManager);
64 
65  CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
66 
67  return result;
68 }
const ConstCpuTensorHandle * m_Weight
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
void IgnoreUnused(Ts &&...)
bool m_BiasEnabled
Enable/disable bias.
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
const ConstCpuTensorHandle * m_Bias
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)