ArmNN
 20.02
TransposeConvolution2dTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 
10 
11 #include <armnnUtils/Permute.hpp>
12 
14 
18 
20 
21 #include <test/TensorHelpers.hpp>
22 
23 #include <boost/test/unit_test.hpp>
24 
25 #include <string>
26 #include <utility>
27 #include <vector>
28 
29 namespace
30 {
31 
32 template<typename T>
33 using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
34 
35 template<typename T>
36 void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
37 {
38  if (data.first.GetNumElements() > data.second.size())
39  {
40  throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
41  std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
42  }
43 }
44 
45 template<typename T, typename BT>
46 void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
49  const TensorData<T>& input,
50  TensorData<T>& output,
51  const TensorData<T>& weights,
52  const armnn::Optional<TensorData<BT>>& biases)
53 {
54  IgnoreUnused(memoryManager);
55  using namespace armnn;
56 
57  VerifyInputTensorData(input, "input");
58  VerifyInputTensorData(weights, "biases");
59 
60  if (descriptor.m_BiasEnabled)
61  {
62  if (!biases.has_value())
63  {
64  throw InvalidArgumentException("Bias enabled but no bias data provided");
65  }
66  VerifyInputTensorData(biases.value(), "biases");
67  }
68 
69  // set up weights
70  ScopedCpuTensorHandle weightsTensor(weights.first);
71 
73  queueDescriptor.m_Parameters = descriptor;
74  queueDescriptor.m_Weight = &weightsTensor;
75 
76  AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
77 
78  std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
79  if (descriptor.m_BiasEnabled)
80  {
81  // set up biases
82  biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
83  queueDescriptor.m_Bias = biasesTensor.get();
84 
85  AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
86  }
87 
88  // set up input and output handles
89  std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(input.first);
90  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(output.first);
91 
92  // set up workload
93  armnn::WorkloadInfo workloadInfo;
94  AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
95  AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
96 
97  std::unique_ptr<armnn::IWorkload> workload =
98  workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
99 
100  inputHandle->Allocate();
101  outputHandle->Allocate();
102 
103  CopyDataToITensorHandle(inputHandle.get(), input.second.data());
104 
105  ExecuteWorkload(*workload, memoryManager);
106 
107  // copy output
108  output.second = std::vector<T>(output.first.GetNumElements(), 0.0f);
109  CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
110 }
111 
112 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
113 LayerTestResult<T, 4> TransposeConvolution2dTest(
114  armnn::IWorkloadFactory& workloadFactory,
116  const armnn::TransposeConvolution2dDescriptor& descriptor,
117  armnn::TensorInfo& inputInfo,
118  const std::vector<float>& inputData,
119  armnn::TensorInfo& outputInfo,
120  const std::vector<float>& expectedOutputData,
121  armnn::TensorInfo& weightsInfo,
122  const std::vector<float>& weightsData,
123  armnn::TensorInfo& biasesInfo,
124  const std::vector<float>& biasesData)
125 {
126  using namespace armnn;
127 
128  // set up quantization parameters
129  if (armnn::IsQuantizedType<T>())
130  {
131  constexpr float qScale = 0.50f;
132  constexpr int32_t qOffset = 10;
133 
134  inputInfo.SetQuantizationScale(qScale);
135  inputInfo.SetQuantizationOffset(qOffset);
136 
137  outputInfo.SetQuantizationScale(qScale);
138  outputInfo.SetQuantizationOffset(qOffset);
139 
140  weightsInfo.SetQuantizationScale(qScale);
141  weightsInfo.SetQuantizationOffset(qOffset);
142 
143  biasesInfo.SetQuantizationScale(qScale * qScale);
144  biasesInfo.SetQuantizationOffset(0);
145  }
146 
147  // set up input
148  TensorData<T> input =
149  {
150  inputInfo,
151  armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
152  };
153 
154  // set up weights
155  TensorData<T> weights =
156  {
157  weightsInfo,
158  armnnUtils::QuantizedVector<T>(weightsData,
159  weightsInfo.GetQuantizationScale(),
160  weightsInfo.GetQuantizationOffset())
161  };
162 
163  // set up biases
165  Optional<TensorData<BT>> optionalBiases;
166  if (descriptor.m_BiasEnabled)
167  {
168  TensorData<BT> biases =
169  {
170  biasesInfo,
171  armnnUtils::QuantizedVector<BT>(biasesData,
172  biasesInfo.GetQuantizationScale(),
173  biasesInfo.GetQuantizationOffset())
174  };
175 
176  optionalBiases = Optional<TensorData<BT>>(biases);
177  }
178 
179  // set up output
180  TensorData<T> output = { outputInfo, {} };
181 
182  // execute test
183  TransposeConvolution2dTestImpl(workloadFactory,
184  memoryManager,
185  descriptor,
186  input,
187  output,
188  weights,
189  optionalBiases);
190 
191  // construct result object
192  LayerTestResult<T, 4> testResult(outputInfo);
193  testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
194  testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
195  armnnUtils::QuantizedVector<T>(expectedOutputData,
196  outputInfo.GetQuantizationScale(),
197  outputInfo.GetQuantizationOffset()));
198 
199  return testResult;
200 }
201 
202 template<typename T>
203 void SwizzleData(armnn::TensorInfo& inputInfo,
204  std::vector<T>& inputData,
205  armnn::TensorInfo& outputInfo,
206  std::vector<T>& outputData,
207  armnn::TensorInfo& weightsInfo,
208  std::vector<T>& weightsData)
209 {
210  PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
211  PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
212  PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
213 }
214 
215 } // anonymous namespace
216 
217 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
219  armnn::IWorkloadFactory& workloadFactory,
221  bool biasEnabled,
222  const armnn::DataLayout layout)
223 {
224  using namespace armnn;
225 
226  constexpr unsigned int batches = 1u;
227  constexpr unsigned int channels = 1u;
228 
229  constexpr unsigned int wInput = 3u;
230  constexpr unsigned int hInput = wInput;
231 
232  constexpr unsigned int wOutput = 5u;
233  constexpr unsigned int hOutput = wOutput;
234 
235  constexpr unsigned int wWeights = 3u;
236  constexpr unsigned int hWeights = wWeights;
237 
238  TensorShape inputShape = { batches, channels, hInput, wInput };
239  TensorShape outputShape = { batches, channels, hOutput, wOutput };
240  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
241 
242  TensorInfo inputInfo(inputShape, ArmnnType);
243  TensorInfo outputInfo(outputShape, ArmnnType);
244  TensorInfo weightsInfo(weightsShape, ArmnnType);
245  TensorInfo biasesInfo({ channels }, ArmnnBType);
246 
247  std::vector<float> inputData =
248  {
249  1.f, 1.f, 1.f,
250  1.f, 1.f, 1.f,
251  1.f, 1.f, 1.f
252  };
253 
254  std::vector<float> weightsData =
255  {
256  1.f, 2.f, 3.f,
257  4.f, 5.f, 6.f,
258  7.f, 8.f, 9.f
259  };
260 
261  std::vector<float> biasesData = { 1.f };
262 
263  std::vector<float> expectedOutputData =
264  {
265  1.f, 3.f, 6.f, 5.f, 3.f,
266  5.f, 12.f, 21.f, 16.f, 9.f,
267  12.f, 27.f, 45.f, 33.f, 18.f,
268  11.f, 24.f, 39.f, 28.f, 15.f,
269  7.f, 15.f, 24.f, 17.f, 9.f
270  };
271 
272  if (biasEnabled)
273  {
274  // apply bias to expected output data
275  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
276  [&](float f) -> float { return f + biasesData[0]; });
277  }
278 
280  descriptor.m_StrideX = 1;
281  descriptor.m_StrideY = 1;
282  descriptor.m_BiasEnabled = biasEnabled;
283  descriptor.m_DataLayout = layout;
284 
285  // swizzle data if needed
286  if (layout == armnn::DataLayout::NHWC)
287  {
288  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
289  }
290 
291  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
292  memoryManager,
293  descriptor,
294  inputInfo,
295  inputData,
296  outputInfo,
297  expectedOutputData,
298  weightsInfo,
299  weightsData,
300  biasesInfo,
301  biasesData);
302 }
303 
304 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
306  armnn::IWorkloadFactory& workloadFactory,
308  bool biasEnabled,
309  const armnn::DataLayout layout)
310 {
311  using namespace armnn;
312 
313  constexpr unsigned int batches = 1u;
314  constexpr unsigned int channels = 1u;
315 
316  constexpr unsigned int wInput = 4u;
317  constexpr unsigned int hInput = wInput;
318 
319  constexpr unsigned int wOutput = 2u;
320  constexpr unsigned int hOutput = wOutput;
321 
322  constexpr unsigned int wWeights = 3u;
323  constexpr unsigned int hWeights = wWeights;
324 
325  TensorShape inputShape = { batches, channels, hInput, wInput };
326  TensorShape outputShape = { batches, channels, hOutput, wOutput };
327  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
328 
329  TensorInfo inputInfo(inputShape, ArmnnType);
330  TensorInfo outputInfo(outputShape, ArmnnType);
331  TensorInfo weightsInfo(weightsShape, ArmnnType);
332  TensorInfo biasesInfo({ channels }, ArmnnBType);
333 
334  std::vector<float> inputData =
335  {
336  1.f, 3.f, 2.f, 1.f,
337  1.f, 3.f, 3.f, 1.f,
338  2.f, 1.f, 1.f, 3.f,
339  3.f, 2.f, 3.f, 3.f
340  };
341 
342  std::vector<float> weightsData =
343  {
344  1.f, 2.f, 3.f,
345  0.f, 1.f, 0.f,
346  2.f, 1.f, 2.f
347  };
348 
349  std::vector<float> biasesData = { 1.f };
350 
351  std::vector<float> expectedOutputData =
352  {
353  21.f, 21.f,
354  28.f, 27.f
355  };
356 
357  if (biasEnabled)
358  {
359  // apply bias to expected output data
360  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
361  [&](float f) -> float { return f + biasesData[0]; });
362  }
363 
365  descriptor.m_PadLeft = 2;
366  descriptor.m_PadRight = 2;
367  descriptor.m_PadTop = 2;
368  descriptor.m_PadBottom = 2;
369  descriptor.m_StrideX = 1;
370  descriptor.m_StrideY = 1;
371  descriptor.m_BiasEnabled = biasEnabled;
372  descriptor.m_DataLayout = layout;
373 
374  // swizzle data if needed
375  if (layout == armnn::DataLayout::NHWC)
376  {
377  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
378  }
379 
380  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
381  memoryManager,
382  descriptor,
383  inputInfo,
384  inputData,
385  outputInfo,
386  expectedOutputData,
387  weightsInfo,
388  weightsData,
389  biasesInfo,
390  biasesData);
391 }
392 
393 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
395  armnn::IWorkloadFactory& workloadFactory,
397  bool biasEnabled,
398  const armnn::DataLayout layout)
399 {
400  using namespace armnn;
401 
402  constexpr unsigned int batches = 1u;
403  constexpr unsigned int channels = 1u;
404 
405  constexpr unsigned int wInput = 3u;
406  constexpr unsigned int hInput = wInput;
407 
408  constexpr unsigned int wOutput = 7u;
409  constexpr unsigned int hOutput = wOutput;
410 
411  constexpr unsigned int wWeights = 3u;
412  constexpr unsigned int hWeights = wWeights;
413 
414  TensorShape inputShape = { batches, channels, hInput, wInput };
415  TensorShape outputShape = { batches, channels, hOutput, wOutput };
416  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
417 
418  TensorInfo inputInfo(inputShape, ArmnnType);
419  TensorInfo outputInfo(outputShape, ArmnnType);
420  TensorInfo weightsInfo(weightsShape, ArmnnType);
421  TensorInfo biasesInfo({ channels }, ArmnnBType);
422 
423  std::vector<float> inputData =
424  {
425  1.f, 1.f, 1.f,
426  1.f, 1.f, 1.f,
427  1.f, 1.f, 1.f
428  };
429 
430  std::vector<float> weightsData =
431  {
432  1.f, 2.f, 3.f,
433  4.f, 5.f, 6.f,
434  7.f, 8.f, 9.f
435  };
436 
437  std::vector<float> biasesData = { 1.f };
438 
439  std::vector<float> expectedOutputData =
440  {
441  1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
442  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
443  8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
444  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
445  8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
446  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
447  7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
448  };
449 
450  if (biasEnabled)
451  {
452  // apply bias to expected output data
453  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
454  [&](float f) -> float { return f + biasesData[0]; });
455  }
456 
458  descriptor.m_StrideX = 2;
459  descriptor.m_StrideY = 2;
460  descriptor.m_BiasEnabled = biasEnabled;
461  descriptor.m_DataLayout = layout;
462 
463  // swizzle data if needed
464  if (layout == armnn::DataLayout::NHWC)
465  {
466  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
467  }
468 
469  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
470  memoryManager,
471  descriptor,
472  inputInfo,
473  inputData,
474  outputInfo,
475  expectedOutputData,
476  weightsInfo,
477  weightsData,
478  biasesInfo,
479  biasesData);
480 }
481 
482 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
484  armnn::IWorkloadFactory& workloadFactory,
486  const armnn::DataLayout layout)
487 {
488  using namespace armnn;
489 
490  TensorShape inputShape = { 1, 1, 2, 2 };
491  TensorShape outputShape = { 1, 2, 5, 5 };
492 
493  // OIHW for NCHW; OHWI for NHWC
494  TensorShape weightsShape = { 2, 1, 3, 3 };
495  TensorShape biasesShape = { 2 };
496 
497  TensorInfo inputInfo(inputShape, ArmnnType);
498  TensorInfo outputInfo(outputShape, ArmnnType);
499  TensorInfo weightsInfo(weightsShape, ArmnnType);
500  TensorInfo biasesInfo(biasesShape, ArmnnBType);
501 
502  std::vector<float> inputData =
503  {
504  1.f, 2.f,
505  3.f, 4.f,
506  };
507 
508  std::vector<float> weightsData =
509  {
510  1.f, 3.f, 5.f,
511  7.f, 9.f, 11.f,
512  13.f, 15.f, 17.f,
513 
514  2.f, 4.f, 6.f,
515  8.f, 10.f, 12.f,
516  14.f, 16.f, 18.f
517  };
518 
519  std::vector<float> biasesData = { -1.5f, -2.0f };
520 
521  std::vector<float> expectedOutputData =
522  {
523  -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
524  5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
525  14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
526  19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
527  37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
528 
529  0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
530  6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
531  18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
532  22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
533  40.0f, 46.0f, 108.0f, 62.0f, 70.0f
534  };
535 
537  descriptor.m_StrideX = 2;
538  descriptor.m_StrideY = 2;
539  descriptor.m_BiasEnabled = true;
540  descriptor.m_DataLayout = layout;
541 
542  // swizzle data if needed
543  if (layout == armnn::DataLayout::NHWC)
544  {
545  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
546  }
547 
548  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
549  memoryManager,
550  descriptor,
551  inputInfo,
552  inputData,
553  outputInfo,
554  expectedOutputData,
555  weightsInfo,
556  weightsData,
557  biasesInfo,
558  biasesData);
559 }
560 
562  armnn::IWorkloadFactory& workloadFactory,
564  const armnn::DataLayout layout)
565 {
566  using namespace armnn;
567 
568  const DataType inputType = DataType::QAsymmU8;
569  const DataType kernelType = DataType::QSymmS8;
570  const DataType biasType = DataType::Signed32;
571 
572  TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
573  TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
574 
575  const std::vector<float> quantScales{ 0.25f, 0.5f };
576  constexpr unsigned int quantDimension = 0;
577 
578  TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
579 
580  const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
581  TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
582 
583  std::vector<uint8_t> inputData =
584  {
585  12, 14,
586  16, 18
587  };
588 
589  std::vector<int8_t> kernelData =
590  {
591  4, 12, 20,
592  28, 36, 44,
593  52, 60, 68,
594 
595  4, 8, 12,
596  16, 20, 24,
597  28, 32, 36
598  };
599 
600  std::vector<int32_t> biasData = { -12, -8 };
601 
602  std::vector<uint8_t> expectedOutputData =
603  {
604  9, 13, 21, 19, 27,
605  21, 25, 57, 43, 51,
606  39, 55, 131, 91, 115,
607  49, 61, 129, 79, 95,
608  85, 97, 213, 127, 143,
609 
610  10, 14, 26, 22, 30,
611  22, 26, 62, 46, 54,
612  46, 62, 150, 102, 126,
613  54, 66, 142, 86, 102,
614  90, 102, 226, 134, 150
615  };
616 
617  if (layout == DataLayout::NHWC)
618  {
619  PermuteTensorNchwToNhwc(inputInfo, inputData);
620  PermuteTensorNchwToNhwc(kernelInfo, kernelData);
621  PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
622  }
623 
625  descriptor.m_StrideX = 2;
626  descriptor.m_StrideY = 2;
627  descriptor.m_BiasEnabled = true;
628  descriptor.m_DataLayout = layout;
629 
630  std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
631  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
632 
633  WorkloadInfo workloadInfo;
634  ScopedCpuTensorHandle weightTensor(kernelInfo);
635  ScopedCpuTensorHandle biasTensor(biasInfo);
636 
637  AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
638  AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
639 
641  queueDescriptor.m_Parameters = descriptor;
642  queueDescriptor.m_Weight = &weightTensor;
643  queueDescriptor.m_Bias = &biasTensor;
644 
645  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
646  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
647 
648  std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
649  inputHandle->Allocate();
650  outputHandle->Allocate();
651 
652  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
653 
654  ExecuteWorkload(*workload, memoryManager);
655 
656  LayerTestResult<uint8_t, 4> ret(outputInfo);
657  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
658  ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
659 
660  return ret;
661 }
662 
663 //
664 // Explicit template specializations
665 //
666 
668 SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
669  armnn::IWorkloadFactory& workloadFactory,
671  bool biasEnabled,
672  const armnn::DataLayout layout);
673 
675 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
676  armnn::IWorkloadFactory& workloadFactory,
678  bool biasEnabled,
679  const armnn::DataLayout layout);
680 
682 SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
683  armnn::IWorkloadFactory& workloadFactory,
685  bool biasEnabled,
686  const armnn::DataLayout layout);
687 
689 PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
690  armnn::IWorkloadFactory& workloadFactory,
692  bool biasEnabled,
693  const armnn::DataLayout layout);
694 
695 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
696 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
697  armnn::IWorkloadFactory& workloadFactory,
699  bool biasEnabled,
700  const armnn::DataLayout layout);
701 
702 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
703 PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
704  armnn::IWorkloadFactory& workloadFactory,
706  bool biasEnabled,
707  const armnn::DataLayout layout);
708 
709 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
710 StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
711  armnn::IWorkloadFactory& workloadFactory,
713  bool biasEnabled,
714  const armnn::DataLayout layout);
715 
716 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
717 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
718  armnn::IWorkloadFactory& workloadFactory,
720  bool biasEnabled,
721  const armnn::DataLayout layout);
722 
723 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
724 StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
725  armnn::IWorkloadFactory& workloadFactory,
727  bool biasEnabled,
728  const armnn::DataLayout layout);
729 
730 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
731 MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
732  armnn::IWorkloadFactory& workloadFactory,
734  const armnn::DataLayout layout);
735 
736 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
737 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
738  armnn::IWorkloadFactory& workloadFactory,
740  const armnn::DataLayout layout);
741 
742 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
743 MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
744  armnn::IWorkloadFactory& workloadFactory,
746  const armnn::DataLayout layout);
DataLayout
Definition: Types.hpp:49
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
LayerTestResult< T, 4 > PaddedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
LayerTestResult< T, 4 > MultiChannelTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
bool m_BiasEnabled
Enable/disable bias.
boost::multi_array< T, n > outputExpected
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:73
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
void PermuteTensorNchwToNhwc(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
DataType
Definition: Types.hpp:32
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:264
float GetQuantizationScale() const
Definition: Tensor.cpp:247
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< T, 4 > SimpleTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
boost::multi_array< T, n > output
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< uint8_t, 4 > TransposeConvolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > StridedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:275
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)