ArmNN
 21.05
FullyConnectedTestImpl.cpp File Reference

Go to the source code of this file.

Functions

template<typename T , typename B >
LayerTestResult< T, 2 > SimpleFullyConnectedTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsDesc, armnn::TensorInfo biasesDesc, boost::multi_array< T, 2 > &weights, boost::multi_array< B, 1 > &bias, boost::multi_array< T, 4 > &input, bool biasEnabled, bool transposeWeights)
 
template<typename T , typename B >
LayerTestResult< T, 2 > SimpleFullyConnectedTestWeightsAsInputsImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsTensorInfo, armnn::TensorInfo biasesTensorInfo, boost::multi_array< T, 2 > &weights, boost::multi_array< B, 1 > &bias, boost::multi_array< T, 4 > &input, bool biasEnabled, bool transposeWeights)
 
template<armnn::DataType ArmnnType, typename T >
LayerTestResult< T, 2 > FullyConnectedTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constantWeights)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 2 > FullyConnectedLargeTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights, float qScale=0.0f, int32_t qOffset=0)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmU8 >, 2 > FullyConnectedTest< armnn::DataType::QAsymmU8 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constWeights)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QSymmS16 >, 2 > FullyConnectedTest< armnn::DataType::QSymmS16 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constWeights)
 
LayerTestResult< float, 2 > FullyConnectedFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool transposeWeights)
 
LayerTestResult< float, 2 > FullyConnectedLargeTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights)
 

Function Documentation

◆ FullyConnectedFloat32Test()

LayerTestResult<float, 2> FullyConnectedFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 334 of file FullyConnectedTestImpl.cpp.

References armnn::Float32, LayerTestResult< T, n >::outputExpected, and armnn::swap().

340 {
341  unsigned int inputWidth = 1;
342  unsigned int inputHeight = 1;
343  unsigned int inputChannels = 5;
344  unsigned int inputNum = 2;
345 
346  unsigned int outputChannels = 3;
347  unsigned int outputNum = 2;
348 
349  // Define the tensor descriptors.
350  armnn::TensorInfo inputTensorInfo;
351  armnn::TensorInfo outputTensorInfo;
352  armnn::TensorInfo weightsDesc;
353  armnn::TensorInfo biasesDesc;
354 
355  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
356  unsigned int outputShape[] = { outputNum, outputChannels };
357  unsigned int weightsShape[] = { inputChannels, outputChannels };
358 
359  if (transposeWeights)
360  {
361  std::swap(weightsShape[0], weightsShape[1]);
362  }
363 
364  unsigned int biasShape[] = { outputChannels };
365 
366  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
367  outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
368  weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
369  biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
370 
371  LayerTestResult<float, 2> result(outputTensorInfo);
372 
373  boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
374  {
375  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
376 
377  5.0f, 4.0f, 3.0f, 2.0f, 1.0f
378  })
379  );
380 
381  boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
382  {
383  .5f, 2.f, .5f,
384  .5f, 2.f, 1.f,
385  .5f, 2.f, 2.f,
386  .5f, 2.f, 3.f,
387  .5f, 2.f, 4.f
388  }));
389 
390  if (transposeWeights)
391  {
392  weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
393  {
394  .5f, .5f, .5f, .5f, .5f,
395  2.f, 2.f, 2.f, 2.f, 2.f,
396  .5f, 1.f, 2.f, 3.f, 4.f
397  }));
398  }
399 
400 
401  std::vector<float> biasValues({0.f, 0.f, 0.f});
402  if (biasEnabled)
403  {
404  biasValues = std::vector<float>({10.f, 20.f, 30.f});
405  }
406  boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
407 
408  result = SimpleFullyConnectedTestImpl<float>(
409  workloadFactory,
410  memoryManager,
411  tensorHandleFactory,
412  inputTensorInfo, outputTensorInfo,
413  weightsDesc, biasesDesc,
414  weights, bias, input,
415  biasEnabled, transposeWeights
416  );
417 
418  result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
419  {
420  0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
421  2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
422  0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
423 
424  2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
425  10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
426  2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
427  })
428  );
429 
430  return result;
431 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)

◆ FullyConnectedLargeTest()

LayerTestResult<float, 2> FullyConnectedLargeTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  transposeWeights 
)

Definition at line 433 of file FullyConnectedTestImpl.cpp.

438 {
439  return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
440  memoryManager,
441  tensorHandleFactory,
442  transposeWeights);
443 }

◆ FullyConnectedLargeTestCommon()

LayerTestResult<T, 2> FullyConnectedLargeTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  transposeWeights,
float  qScale = 0.0f,
int32_t  qOffset = 0 
)

Definition at line 229 of file FullyConnectedTestImpl.cpp.

References LayerTestResult< T, n >::outputExpected, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::swap().

236 {
237  unsigned int inputWidth = 1;
238  unsigned int inputHeight = 1;
239  unsigned int inputChannels = 5;
240  unsigned int inputNum = 1;
241 
242  unsigned int outputChannels = 1;
243  unsigned int outputNum = 1;
244 
245  // Define the tensor descriptors.
246  armnn::TensorInfo inputTensorInfo;
247  armnn::TensorInfo outputTensorInfo;
248  armnn::TensorInfo weightsDesc;
249  armnn::TensorInfo biasesDesc;
250 
251  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
252  unsigned int outputShape[] = { outputNum, outputChannels };
253  unsigned int weightsShape[] = { inputChannels, outputChannels };
254  if (transposeWeights)
255  {
256  std::swap(weightsShape[0], weightsShape[1]);
257  }
258 
259  unsigned int biasShape[] = { outputChannels };
260 
261  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
262  outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
263  weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
264  biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
265 
266  // Set quantization parameters if the requested type is a quantized type.
267  if(armnn::IsQuantizedType<T>())
268  {
269  inputTensorInfo.SetQuantizationScale(qScale);
270  inputTensorInfo.SetQuantizationOffset(qOffset);
271  outputTensorInfo.SetQuantizationScale(qScale);
272  outputTensorInfo.SetQuantizationOffset(qOffset);
273  }
274 
275  LayerTestResult<T, 2> result(outputTensorInfo);
276 
277  boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
278  armnnUtils::QuantizedVector<T>({
279  1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
280  },
281  qScale, qOffset)
282  );
283 
284  boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
285  armnnUtils::QuantizedVector<T>({
286  2.0f, 3.0f, 4.0f, 5.0f, 6.0f
287  },
288  qScale, qOffset)
289  );
290 
291  std::vector<T> biasValues({900000.f});
292  boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
293 
294  result = SimpleFullyConnectedTestImpl<T>(
295  workloadFactory,
296  memoryManager,
297  tensorHandleFactory,
298  inputTensorInfo, outputTensorInfo,
299  weightsDesc, biasesDesc,
300  weights, bias, input,
301  true, transposeWeights
302  );
303 
304  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
305  armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
306 
307  return result;
308 }
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480

◆ FullyConnectedTest()

LayerTestResult<T, 2> FullyConnectedTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  constantWeights 
)

Definition at line 128 of file FullyConnectedTestImpl.cpp.

References armnn::GetBiasTypeFromWeightsType(), LayerTestResult< T, n >::outputExpected, and TensorInfo::SetQuantizationScale().

Referenced by armnn::GetVector().

134 {
135  constexpr static unsigned int inputWidth = 3u;
136  constexpr static unsigned int inputHeight = 2u;
137  constexpr static unsigned int inputChannels = 1u;
138 
139  constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
140 
141  constexpr static unsigned int outputChannels = 2u;
142 
143  armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
144  inputTensorInfo.SetQuantizationScale(0.1f);
145  inputTensorInfo.SetQuantizationOffset(63);
146 
147  armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
148  outputTensorInfo.SetQuantizationScale(5.f);
149  outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
150 
151  armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
152  weightsDesc.SetQuantizationScale(0.2f);
153  weightsDesc.SetQuantizationOffset(93);
154 
155  armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
156  biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
157  biasesDesc.SetQuantizationOffset(0);
158 
159  LayerTestResult<T, 2> result(outputTensorInfo);
160 
161  auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
162  {
163  -1.2f, 6.1f, -3.5f,
164  18.8f, -5.5f, 2.9f
165  },
166  inputTensorInfo));
167 
168  auto weights = MakeTensor<T, 2>(weightsDesc, ConvertToDataType<ArmnnType>(
169  {
170  -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
171  23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
172  },
173  weightsDesc));
174 
175  auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
176 
177  if (constantWeights)
178  {
179  result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
180  memoryManager,
181  tensorHandleFactory,
182  inputTensorInfo,
183  outputTensorInfo,
184  weightsDesc,
185  biasesDesc,
186  weights,
187  bias,
188  input,
189  biasEnabled,
190  true);
191  }
192  else
193  {
194  result = SimpleFullyConnectedTestWeightsAsInputsImpl<T>(workloadFactory,
195  memoryManager,
196  tensorHandleFactory,
197  inputTensorInfo,
198  outputTensorInfo,
199  weightsDesc,
200  biasesDesc,
201  weights,
202  bias,
203  input,
204  biasEnabled,
205  true);
206  }
207 
208  if (biasEnabled)
209  {
210  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
211  ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo));
212  }
213  else
214  {
215  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
216  ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo));
217  }
218 
219  return result;
220 }
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464

◆ FullyConnectedTest< armnn::DataType::QAsymmU8 >()

template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2> FullyConnectedTest< armnn::DataType::QAsymmU8 > ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  constWeights 
)

◆ FullyConnectedTest< armnn::DataType::QSymmS16 >()

template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2> FullyConnectedTest< armnn::DataType::QSymmS16 > ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
bool  constWeights 
)

◆ SimpleFullyConnectedTestImpl()

LayerTestResult<T, 2> SimpleFullyConnectedTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
armnn::TensorInfo  inputTensorInfo,
armnn::TensorInfo  outputTensorInfo,
armnn::TensorInfo  weightsDesc,
armnn::TensorInfo  biasesDesc,
boost::multi_array< T, 2 > &  weights,
boost::multi_array< B, 1 > &  bias,
boost::multi_array< T, 4 > &  input,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 24 of file FullyConnectedTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateFullyConnected(), ITensorHandleFactory::CreateTensorHandle(), FullyConnectedQueueDescriptor::m_Bias, FullyConnectedDescriptor::m_BiasEnabled, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, FullyConnectedDescriptor::m_TransposeWeightMatrix, FullyConnectedQueueDescriptor::m_Weight, and LayerTestResult< T, n >::output.

37 {
38  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
39  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
40 
43  armnn::ScopedTensorHandle weightsTensor(weightsDesc);
44  armnn::ScopedTensorHandle biasTensor(biasesDesc);
45 
46  AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
47  AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
48 
49  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
50  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
51  data.m_Weight = &weightsTensor;
52  data.m_Bias = &biasTensor;
53  data.m_Parameters.m_BiasEnabled = biasEnabled;
54  data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
55 
56  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
57  LayerTestResult<T, 2> result(outputTensorInfo);
58 
59  inputHandle->Allocate();
60  outputHandle->Allocate();
61  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
62 
63  ExecuteWorkload(*workload, memoryManager);
64 
65  CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
66 
67  return result;
68 }
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
const ConstTensorHandle * m_Bias
bool m_BiasEnabled
Enable/disable bias.
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
const ConstTensorHandle * m_Weight
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ SimpleFullyConnectedTestWeightsAsInputsImpl()

LayerTestResult<T, 2> SimpleFullyConnectedTestWeightsAsInputsImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
armnn::TensorInfo  inputTensorInfo,
armnn::TensorInfo  outputTensorInfo,
armnn::TensorInfo  weightsTensorInfo,
armnn::TensorInfo  biasesTensorInfo,
boost::multi_array< T, 2 > &  weights,
boost::multi_array< B, 1 > &  bias,
boost::multi_array< T, 4 > &  input,
bool  biasEnabled,
bool  transposeWeights 
)

Definition at line 71 of file FullyConnectedTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateFullyConnected(), ITensorHandleFactory::CreateTensorHandle(), FullyConnectedDescriptor::m_BiasEnabled, FullyConnectedDescriptor::m_ConstantWeights, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, FullyConnectedDescriptor::m_TransposeWeightMatrix, and LayerTestResult< T, n >::output.

84 {
85  std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
86  std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo);
87  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
88 
91 
92  AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
93  AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
94  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
95  data.m_Parameters.m_BiasEnabled = biasEnabled;
96  data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
97  data.m_Parameters.m_ConstantWeights = false;
98 
99  std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
100  if (biasEnabled)
101  {
102  input2Handle = tensorHandleFactory.CreateTensorHandle(biasesTensorInfo);
103  AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
104  }
105 
106  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
107  LayerTestResult<T, 2> result(outputTensorInfo);
108 
109  input0Handle->Allocate();
110  input1Handle->Allocate();
111  outputHandle->Allocate();
112  CopyDataToITensorHandle(input0Handle.get(), &input[0][0][0][0]);
113  CopyDataToITensorHandle(input1Handle.get(), &weights[0][0]);
114  if (biasEnabled)
115  {
116  input2Handle->Allocate();
117  CopyDataToITensorHandle(input2Handle.get(), &bias[0]);
118  }
119 
120  ExecuteWorkload(*workload, memoryManager);
121 
122  CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
123 
124  return result;
125 }
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
bool m_BiasEnabled
Enable/disable bias.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
bool m_ConstantWeights
Enable/disable constant weights and biases.