ArmNN
 20.08
FullyConnectedTestImpl.cpp File Reference

Go to the source code of this file.

Functions

template<typename T , typename B >
LayerTestResult< T, 2 > SimpleFullyConnectedTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsDesc, armnn::TensorInfo biasesDesc, boost::multi_array< T, 2 > &weights, boost::multi_array< B, 1 > &bias, boost::multi_array< T, 4 > &input, bool biasEnabled, bool transposeWeights)
 
template<armnn::DataType ArmnnType, typename T >
LayerTestResult< T, 2 > FullyConnectedTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 2 > FullyConnectedLargeTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool transposeWeights, float qScale=0.0f, int32_t qOffset=0)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmU8 >, 2 > FullyConnectedTest< armnn::DataType::QAsymmU8 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QSymmS16 >, 2 > FullyConnectedTest< armnn::DataType::QSymmS16 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled)
 
LayerTestResult< float, 2 > FullyConnectedFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, bool transposeWeights)
 
LayerTestResult< float, 2 > FullyConnectedLargeTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool transposeWeights)
 

Function Documentation

◆ FullyConnectedFloat32Test()

LayerTestResult<float, 2> FullyConnectedFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 249 of file FullyConnectedTestImpl.cpp.

References armnn::Float32, LayerTestResult< T, n >::outputExpected, and armnn::swap().

254 {
255  unsigned int inputWidth = 1;
256  unsigned int inputHeight = 1;
257  unsigned int inputChannels = 5;
258  unsigned int inputNum = 2;
259 
260  unsigned int outputChannels = 3;
261  unsigned int outputNum = 2;
262 
263  // Define the tensor descriptors.
264  armnn::TensorInfo inputTensorInfo;
265  armnn::TensorInfo outputTensorInfo;
266  armnn::TensorInfo weightsDesc;
267  armnn::TensorInfo biasesDesc;
268 
269  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
270  unsigned int outputShape[] = { outputNum, outputChannels };
271  unsigned int weightsShape[] = { inputChannels, outputChannels };
272 
273  if (transposeWeights)
274  {
275  std::swap(weightsShape[0], weightsShape[1]);
276  }
277 
278  unsigned int biasShape[] = { outputChannels };
279 
280  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
281  outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
282  weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
283  biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
284 
285  LayerTestResult<float, 2> result(outputTensorInfo);
286 
287  boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
288  {
289  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
290 
291  5.0f, 4.0f, 3.0f, 2.0f, 1.0f
292  })
293  );
294 
295  boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
296  {
297  .5f, 2.f, .5f,
298  .5f, 2.f, 1.f,
299  .5f, 2.f, 2.f,
300  .5f, 2.f, 3.f,
301  .5f, 2.f, 4.f
302  }));
303 
304  if (transposeWeights)
305  {
306  weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
307  {
308  .5f, .5f, .5f, .5f, .5f,
309  2.f, 2.f, 2.f, 2.f, 2.f,
310  .5f, 1.f, 2.f, 3.f, 4.f
311  }));
312  }
313 
314 
315  std::vector<float> biasValues({0.f, 0.f, 0.f});
316  if (biasEnabled)
317  {
318  biasValues = std::vector<float>({10.f, 20.f, 30.f});
319  }
320  boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
321 
322  result = SimpleFullyConnectedTestImpl<float>(
323  workloadFactory,
324  memoryManager,
325  inputTensorInfo, outputTensorInfo,
326  weightsDesc, biasesDesc,
327  weights, bias, input,
328  biasEnabled, transposeWeights
329  );
330 
331  result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
332  {
333  0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
334  2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
335  0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
336 
337  2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
338  10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
339  2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
340  })
341  );
342 
343  return result;
344 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)

◆ FullyConnectedLargeTest()

LayerTestResult<float, 2> FullyConnectedLargeTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  transposeWeights 
)

Definition at line 346 of file FullyConnectedTestImpl.cpp.

350 {
351  return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
352 }

◆ FullyConnectedLargeTestCommon()

LayerTestResult<T, 2> FullyConnectedLargeTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  transposeWeights,
float  qScale = 0.0f,
int32_t  qOffset = 0 
)

Definition at line 150 of file FullyConnectedTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::swap().

156 {
157  unsigned int inputWidth = 1;
158  unsigned int inputHeight = 1;
159  unsigned int inputChannels = 5;
160  unsigned int inputNum = 1;
161 
162  unsigned int outputChannels = 1;
163  unsigned int outputNum = 1;
164 
165  // Define the tensor descriptors.
166  armnn::TensorInfo inputTensorInfo;
167  armnn::TensorInfo outputTensorInfo;
168  armnn::TensorInfo weightsDesc;
169  armnn::TensorInfo biasesDesc;
170 
171  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
172  unsigned int outputShape[] = { outputNum, outputChannels };
173  unsigned int weightsShape[] = { inputChannels, outputChannels };
174  if (transposeWeights)
175  {
176  std::swap(weightsShape[0], weightsShape[1]);
177  }
178 
179  unsigned int biasShape[] = { outputChannels };
180 
181  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
182  outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
183  weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
184  biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
185 
186  // Set quantization parameters if the requested type is a quantized type.
187  if(armnn::IsQuantizedType<T>())
188  {
189  inputTensorInfo.SetQuantizationScale(qScale);
190  inputTensorInfo.SetQuantizationOffset(qOffset);
191  outputTensorInfo.SetQuantizationScale(qScale);
192  outputTensorInfo.SetQuantizationOffset(qOffset);
193  }
194 
195  LayerTestResult<T, 2> result(outputTensorInfo);
196 
197  boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
198  armnnUtils::QuantizedVector<T>({
199  1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
200  },
201  qScale, qOffset)
202  );
203 
204  boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
205  armnnUtils::QuantizedVector<T>({
206  2.0f, 3.0f, 4.0f, 5.0f, 6.0f
207  },
208  qScale, qOffset)
209  );
210 
211  std::vector<T> biasValues({900000.f});
212  boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
213 
214  result = SimpleFullyConnectedTestImpl<T>(
215  workloadFactory,
216  memoryManager,
217  inputTensorInfo, outputTensorInfo,
218  weightsDesc, biasesDesc,
219  weights, bias, input,
220  true, transposeWeights
221  );
222 
223  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
224  armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
225 
226  return result;
227 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481

◆ FullyConnectedTest()

LayerTestResult<T, 2> FullyConnectedTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
bool  biasEnabled 
)

Definition at line 73 of file FullyConnectedTestImpl.cpp.

References armnn::GetBiasTypeFromWeightsType(), LayerTestResult< T, n >::outputExpected, TensorInfo::SetQuantizationScale(), and true.

77 {
78  constexpr static unsigned int inputWidth = 3u;
79  constexpr static unsigned int inputHeight = 2u;
80  constexpr static unsigned int inputChannels = 1u;
81 
82  constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
83 
84  constexpr static unsigned int outputChannels = 2u;
85 
86  armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
87  inputTensorInfo.SetQuantizationScale(0.1f);
88  inputTensorInfo.SetQuantizationOffset(63);
89 
90  armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
91  outputTensorInfo.SetQuantizationScale(5.f);
92  outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
93 
94  armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
95  weightsDesc.SetQuantizationScale(0.2f);
96  weightsDesc.SetQuantizationOffset(93);
97 
98  armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
99  biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
100  biasesDesc.SetQuantizationOffset(0);
101 
102  LayerTestResult<T, 2> result(outputTensorInfo);
103 
104  auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
105  {
106  -1.2f, 6.1f, -3.5f,
107  18.8f, -5.5f, 2.9f
108  },
109  inputTensorInfo));
110 
111  auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
112  {
113  -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
114  23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
115  },
116  weightsDesc));
117 
118  auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
119 
120  result = SimpleFullyConnectedTestImpl<T>(
121  workloadFactory,
122  memoryManager,
123  inputTensorInfo, outputTensorInfo,
124  weightsDesc, biasesDesc,
125  weights, bias, input,
126  biasEnabled, true
127  );
128 
129  if (biasEnabled)
130  {
131  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
132  ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
133  }
134  else
135  {
136  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
137  ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
138  }
139 
140  return result;
141 }
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true

◆ FullyConnectedTest< armnn::DataType::QAsymmU8 >()

◆ FullyConnectedTest< armnn::DataType::QSymmS16 >()

◆ SimpleFullyConnectedTestImpl()

LayerTestResult<T, 2> SimpleFullyConnectedTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::TensorInfo  inputTensorInfo,
armnn::TensorInfo  outputTensorInfo,
armnn::TensorInfo  weightsDesc,
armnn::TensorInfo  biasesDesc,
boost::multi_array< T, 2 > &  weights,
boost::multi_array< B, 1 > &  bias,
boost::multi_array< T, 4 > &  input,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 24 of file FullyConnectedTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateFullyConnected(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), and LayerTestResult< T, n >::output.

36 {
37  IgnoreUnused(memoryManager);
39  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
40  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
42 
45  armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
46  armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
47 
48  AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
49  AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
50 
51  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
52  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
53  data.m_Weight = &weightsTensor;
54  data.m_Bias = &biasTensor;
55  data.m_Parameters.m_BiasEnabled = biasEnabled;
56  data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
57 
58  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
59  LayerTestResult<T, 2> result(outputTensorInfo);
60 
61  inputHandle->Allocate();
62  outputHandle->Allocate();
63  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
64 
65  ExecuteWorkload(*workload, memoryManager);
66 
67  CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
68 
69  return result;
70 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)