From f86be93b7492b381370cae7bf71eca8572a0cbae Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Tue, 24 Aug 2021 16:27:15 +0100 Subject: IVGCVSW-5924 Update 21.08 Doxygen Documents * Also updated latest symlink. Signed-off-by: Matthew Sloyan Change-Id: If9b4e0e52464abdf797b9eb858ae19bcc64c2aea --- 21.08/_fully_connected_test_impl_8cpp.xhtml | 562 ++++++++++++++++++++++++++++ 1 file changed, 562 insertions(+) create mode 100644 21.08/_fully_connected_test_impl_8cpp.xhtml (limited to '21.08/_fully_connected_test_impl_8cpp.xhtml') diff --git a/21.08/_fully_connected_test_impl_8cpp.xhtml b/21.08/_fully_connected_test_impl_8cpp.xhtml new file mode 100644 index 0000000000..112befb0e5 --- /dev/null +++ b/21.08/_fully_connected_test_impl_8cpp.xhtml @@ -0,0 +1,562 @@ + + + + + + + + + + + + + +ArmNN: src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp File Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  21.08 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
FullyConnectedTestImpl.cpp File Reference
+
+
+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + +

+Functions

template<typename T , typename B >
LayerTestResult< T, 2 > SimpleFullyConnectedTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsTensorInfo, armnn::TensorInfo biasesTensorInfo, std::vector< T > &weights, std::vector< B > &bias, std::vector< T > &input, bool biasEnabled, bool transposeWeights, bool constantWeights)
 
template<armnn::DataType ArmnnType, typename T >
LayerTestResult< T, 2 > FullyConnectedTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constantWeights)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 2 > FullyConnectedLargeTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights, float qScale=0.0f, int32_t qOffset=0)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmU8 >, 2 > FullyConnectedTest< armnn::DataType::QAsymmU8 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constWeights)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QSymmS16 >, 2 > FullyConnectedTest< armnn::DataType::QSymmS16 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constWeights)
 
LayerTestResult< float, 2 > FullyConnectedFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool transposeWeights)
 
LayerTestResult< float, 2 > FullyConnectedLargeTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights)
 
+

Function Documentation

+ +

◆ FullyConnectedFloat32Test()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 2> FullyConnectedFloat32Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory,
bool biasEnabled,
bool transposeWeights 
)
+
+ +

Definition at line 278 of file FullyConnectedTestImpl.cpp.

+ +

References armnn::Float32, LayerTestResult< T, n >::m_ExpectedData, and armnn::swap().

+ +

Referenced by TEST_SUITE().

+
284 {
285  unsigned int inputWidth = 1;
286  unsigned int inputHeight = 1;
287  unsigned int inputChannels = 5;
288  unsigned int inputNum = 2;
289 
290  unsigned int outputChannels = 3;
291  unsigned int outputNum = 2;
292 
293  // Define the tensor descriptors.
294  armnn::TensorInfo inputTensorInfo;
295  armnn::TensorInfo outputTensorInfo;
296  armnn::TensorInfo weightsDesc;
297  armnn::TensorInfo biasesDesc;
298 
299  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
300  unsigned int outputShape[] = { outputNum, outputChannels };
301  unsigned int weightsShape[] = { inputChannels, outputChannels };
302 
303  if (transposeWeights)
304  {
305  std::swap(weightsShape[0], weightsShape[1]);
306  }
307 
308  unsigned int biasShape[] = { outputChannels };
309 
310  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
311  outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
312  weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
313  biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
314 
315  LayerTestResult<float, 2> result(outputTensorInfo);
316 
317  std::vector<float> input =
318  {
319  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
320  5.0f, 4.0f, 3.0f, 2.0f, 1.0f
321  };
322 
323  std::vector<float> weights =
324  {
325  .5f, 2.f, .5f,
326  .5f, 2.f, 1.f,
327  .5f, 2.f, 2.f,
328  .5f, 2.f, 3.f,
329  .5f, 2.f, 4.f
330  };
331 
332  if (transposeWeights)
333  {
334  weights =
335  {
336  .5f, .5f, .5f, .5f, .5f,
337  2.f, 2.f, 2.f, 2.f, 2.f,
338  .5f, 1.f, 2.f, 3.f, 4.f
339  };
340  }
341 
342  std::vector<float> biasValues({0.f, 0.f, 0.f});
343  if (biasEnabled)
344  {
345  biasValues = std::vector<float>({10.f, 20.f, 30.f});
346  }
347 
348  result = SimpleFullyConnectedTestImpl<float>(
349  workloadFactory,
350  memoryManager,
351  tensorHandleFactory,
352  inputTensorInfo, outputTensorInfo,
353  weightsDesc, biasesDesc,
354  weights, biasValues, input,
355  biasEnabled, transposeWeights, true
356  );
357 
358  std::vector<float> expectedOutput =
359  {
360  0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
361  2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
362  0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
363 
364  2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
365  10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
366  2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
367  };
368  result.m_ExpectedData = expectedOutput;
369 
370  return result;
371 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
+ + + +
+
+
+ +

◆ FullyConnectedLargeTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 2> FullyConnectedLargeTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory,
bool transposeWeights 
)
+
+ +

Definition at line 373 of file FullyConnectedTestImpl.cpp.

+ +

Referenced by TEST_SUITE().

+
378 {
379  return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
380  memoryManager,
381  tensorHandleFactory,
382  transposeWeights);
383 }
+
+
+ +

◆ FullyConnectedLargeTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 2> FullyConnectedLargeTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory,
bool transposeWeights,
float qScale = 0.0f,
int32_t qOffset = 0 
)
+
+ +

Definition at line 177 of file FullyConnectedTestImpl.cpp.

+ +

References LayerTestResult< T, n >::m_ExpectedData, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::swap().

+
184 {
185  unsigned int inputWidth = 1;
186  unsigned int inputHeight = 1;
187  unsigned int inputChannels = 5;
188  unsigned int inputNum = 1;
189 
190  unsigned int outputChannels = 1;
191  unsigned int outputNum = 1;
192 
193  // Define the tensor descriptors.
194  armnn::TensorInfo inputTensorInfo;
195  armnn::TensorInfo outputTensorInfo;
196  armnn::TensorInfo weightsDesc;
197  armnn::TensorInfo biasesDesc;
198 
199  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
200  unsigned int outputShape[] = { outputNum, outputChannels };
201  unsigned int weightsShape[] = { inputChannels, outputChannels };
202  if (transposeWeights)
203  {
204  std::swap(weightsShape[0], weightsShape[1]);
205  }
206 
207  unsigned int biasShape[] = { outputChannels };
208 
209  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
210  outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
211  weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
212  biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
213 
214  // Set quantization parameters if the requested type is a quantized type.
215  if(armnn::IsQuantizedType<T>())
216  {
217  inputTensorInfo.SetQuantizationScale(qScale);
218  inputTensorInfo.SetQuantizationOffset(qOffset);
219  outputTensorInfo.SetQuantizationScale(qScale);
220  outputTensorInfo.SetQuantizationOffset(qOffset);
221  }
222 
223  LayerTestResult<T, 2> result(outputTensorInfo);
224 
225  std::vector<T> input = armnnUtils::QuantizedVector<T>(
226  {
227  1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
228  },
229  qScale, qOffset);
230 
231  std::vector<T> weights = armnnUtils::QuantizedVector<T>(
232  {
233  2.0f, 3.0f, 4.0f, 5.0f, 6.0f
234  },
235  qScale, qOffset);
236 
237  std::vector<T> biasValues({900000.f});
238 
239  result = SimpleFullyConnectedTestImpl<T>(
240  workloadFactory,
241  memoryManager,
242  tensorHandleFactory,
243  inputTensorInfo, outputTensorInfo,
244  weightsDesc, biasesDesc,
245  weights, biasValues, input,
246  true, transposeWeights, true
247  );
248 
249  result.m_ExpectedData = armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset);
250 
251  return result;
252 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
+ +
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
+ +
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
+
+
+
+ +

◆ FullyConnectedTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 2> FullyConnectedTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory,
bool biasEnabled,
bool constantWeights 
)
+
+ +

Definition at line 95 of file FullyConnectedTestImpl.cpp.

+ +

References armnn::GetBiasTypeFromWeightsType(), LayerTestResult< T, n >::m_ExpectedData, and TensorInfo::SetQuantizationScale().

+
101 {
102  constexpr static unsigned int inputWidth = 3u;
103  constexpr static unsigned int inputHeight = 2u;
104  constexpr static unsigned int inputChannels = 1u;
105 
106  constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
107 
108  constexpr static unsigned int outputChannels = 2u;
109 
110  armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
111  inputTensorInfo.SetQuantizationScale(0.1f);
112  inputTensorInfo.SetQuantizationOffset(63);
113 
114  armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
115  outputTensorInfo.SetQuantizationScale(5.f);
116  outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
117 
118  armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
119  weightsDesc.SetQuantizationScale(0.2f);
120  weightsDesc.SetQuantizationOffset(93);
121 
122  armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
123  biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
124  biasesDesc.SetQuantizationOffset(0);
125 
126  LayerTestResult<T, 2> result(outputTensorInfo);
127 
128  std::vector<T> input = ConvertToDataType<ArmnnType>(
129  {
130  -1.2f, 6.1f, -3.5f,
131  18.8f, -5.5f, 2.9f
132  },
133  inputTensorInfo);
134 
135  std::vector<T> weights = ConvertToDataType<ArmnnType>(
136  {
137  -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
138  23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
139  },
140  weightsDesc);
141 
142  std::vector<int32_t> bias = {9250, 67500};
143 
144  result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
145  memoryManager,
146  tensorHandleFactory,
147  inputTensorInfo,
148  outputTensorInfo,
149  weightsDesc,
150  biasesDesc,
151  weights,
152  bias,
153  input,
154  biasEnabled,
155  true,
156  constantWeights);
157 
158  if (biasEnabled)
159  {
160  result.m_ExpectedData = ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo);
161  }
162  else
163  {
164  result.m_ExpectedData = ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo);
165  }
166 
167  return result;
168 }
+
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
+
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
+ +
+
+
+ +

◆ FullyConnectedTest< armnn::DataType::QAsymmU8 >()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2> FullyConnectedTest< armnn::DataType::QAsymmU8 > (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory,
bool biasEnabled,
bool constWeights 
)
+
+ +
+
+ +

◆ FullyConnectedTest< armnn::DataType::QSymmS16 >()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2> FullyConnectedTest< armnn::DataType::QSymmS16 > (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory,
bool biasEnabled,
bool constWeights 
)
+
+ +
+
+ +

◆ SimpleFullyConnectedTestImpl()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 2> SimpleFullyConnectedTestImpl (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
const armnn::ITensorHandleFactorytensorHandleFactory,
armnn::TensorInfo inputTensorInfo,
armnn::TensorInfo outputTensorInfo,
armnn::TensorInfo weightsTensorInfo,
armnn::TensorInfo biasesTensorInfo,
std::vector< T > & weights,
std::vector< B > & bias,
std::vector< T > & input,
bool biasEnabled,
bool transposeWeights,
bool constantWeights 
)
+
+ +

Definition at line 24 of file FullyConnectedTestImpl.cpp.

+ +

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateFullyConnected(), ITensorHandleFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), and LayerTestResult< T, n >::m_ActualData.

+
38 {
39  std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
40  std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo);
41  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
42 
43  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
44 
47  armnn::ScopedTensorHandle weightsTensor(weightsTensorInfo);
48  armnn::ScopedTensorHandle biasTensor(biasesTensorInfo);
49 
50  AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data());
51  AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
52 
53  AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
54  AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
55  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
56 
57  // Need to set as layer members will be null when creating the workload because the optimization hasn't been run.
58  data.m_Weight = &weightsTensor;
59  data.m_Bias = &biasTensor;
60 
61  data.m_Parameters.m_BiasEnabled = biasEnabled;
62  data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
63  data.m_Parameters.m_ConstantWeights = constantWeights;
64 
65  std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
66  if (biasEnabled)
67  {
68  input2Handle = tensorHandleFactory.CreateTensorHandle(biasesTensorInfo);
69  AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
70  }
71 
72  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
73  LayerTestResult<T, 2> result(outputTensorInfo);
74 
75  input0Handle->Allocate();
76  input1Handle->Allocate();
77  outputHandle->Allocate();
78  CopyDataToITensorHandle(input0Handle.get(), input.data());
79  CopyDataToITensorHandle(input1Handle.get(), weights.data());
80  if (biasEnabled)
81  {
82  input2Handle->Allocate();
83  CopyDataToITensorHandle(input2Handle.get(), bias.data());
84  }
85 
86  ExecuteWorkload(*workload, memoryManager);
87 
88  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
89  result.m_ActualData = actualOutput;
90 
91  return result;
92 }
+ +
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
+ +
Contains information about TensorInfos of a layer.
+ +
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
+
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
+
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
+
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+
+
+
+
+
+ + + + -- cgit v1.2.1