ArmNN
 21.02
TransposeConvolution2dTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 
10 
11 #include <armnnUtils/Permute.hpp>
12 
14 
18 
20 
21 #include <test/TensorHelpers.hpp>
22 
23 #include <boost/test/unit_test.hpp>
24 
25 #include <string>
26 #include <utility>
27 #include <vector>
28 
29 namespace
30 {
31 
32 template<typename T>
33 using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
34 
35 template<typename T>
36 void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
37 {
38  if (data.first.GetNumElements() > data.second.size())
39  {
40  throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
41  std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
42  }
43 }
44 
45 template<typename T, typename BT>
46 void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
48  const armnn::ITensorHandleFactory& tensorHandleFactory,
50  const TensorData<T>& input,
51  TensorData<T>& output,
52  const TensorData<T>& weights,
53  const armnn::Optional<TensorData<BT>>& biases)
54 {
55  IgnoreUnused(memoryManager);
56  using namespace armnn;
57 
58  VerifyInputTensorData(input, "input");
59  VerifyInputTensorData(weights, "biases");
60 
61  if (descriptor.m_BiasEnabled)
62  {
63  if (!biases.has_value())
64  {
65  throw InvalidArgumentException("Bias enabled but no bias data provided");
66  }
67  VerifyInputTensorData(biases.value(), "biases");
68  }
69 
70  // set up weights
71  ScopedCpuTensorHandle weightsTensor(weights.first);
72 
74  queueDescriptor.m_Parameters = descriptor;
75  queueDescriptor.m_Weight = &weightsTensor;
76 
77  AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
78 
79  std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
80  if (descriptor.m_BiasEnabled)
81  {
82  // set up biases
83  biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
84  queueDescriptor.m_Bias = biasesTensor.get();
85 
86  AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
87  }
88 
89  // set up input and output handles
90  std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(input.first);
91  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(output.first);
92 
93  // set up workload
94  armnn::WorkloadInfo workloadInfo;
95  AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
96  AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
97 
98  std::unique_ptr<armnn::IWorkload> workload =
99  workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
100 
101  inputHandle->Allocate();
102  outputHandle->Allocate();
103 
104  CopyDataToITensorHandle(inputHandle.get(), input.second.data());
105 
106  ExecuteWorkload(*workload, memoryManager);
107 
108  // copy output
109  output.second = std::vector<T>(output.first.GetNumElements(), T());
110  CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
111 }
112 
113 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
114 LayerTestResult<T, 4> TransposeConvolution2dTest(
115  armnn::IWorkloadFactory& workloadFactory,
117  const armnn::ITensorHandleFactory& tensorHandleFactory,
118  const armnn::TransposeConvolution2dDescriptor& descriptor,
119  armnn::TensorInfo& inputInfo,
120  const std::vector<float>& inputData,
121  armnn::TensorInfo& outputInfo,
122  const std::vector<float>& expectedOutputData,
123  armnn::TensorInfo& weightsInfo,
124  const std::vector<float>& weightsData,
125  armnn::TensorInfo& biasesInfo,
126  const std::vector<float>& biasesData)
127 {
128  using namespace armnn;
129 
130  // set up quantization parameters
131  if (armnn::IsQuantizedType<T>())
132  {
133  constexpr float qScale = 0.50f;
134  constexpr int32_t qOffset = 10;
135 
136  inputInfo.SetQuantizationScale(qScale);
137  inputInfo.SetQuantizationOffset(qOffset);
138 
139  outputInfo.SetQuantizationScale(qScale);
140  outputInfo.SetQuantizationOffset(qOffset);
141 
142  weightsInfo.SetQuantizationScale(qScale);
143  weightsInfo.SetQuantizationOffset(qOffset);
144 
145  biasesInfo.SetQuantizationScale(qScale * qScale);
146  biasesInfo.SetQuantizationOffset(0);
147  }
148 
149  // set up input
150  TensorData<T> input =
151  {
152  inputInfo,
153  armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
154  };
155 
156  // set up weights
157  TensorData<T> weights =
158  {
159  weightsInfo,
160  armnnUtils::QuantizedVector<T>(weightsData,
161  weightsInfo.GetQuantizationScale(),
162  weightsInfo.GetQuantizationOffset())
163  };
164 
165  // set up biases
167  Optional<TensorData<BT>> optionalBiases;
168  if (descriptor.m_BiasEnabled)
169  {
170  TensorData<BT> biases =
171  {
172  biasesInfo,
173  armnnUtils::QuantizedVector<BT>(biasesData,
174  biasesInfo.GetQuantizationScale(),
175  biasesInfo.GetQuantizationOffset())
176  };
177 
178  optionalBiases = Optional<TensorData<BT>>(biases);
179  }
180 
181  // set up output
182  TensorData<T> output = { outputInfo, {} };
183 
184  // execute test
185  TransposeConvolution2dTestImpl(workloadFactory,
186  memoryManager,
187  tensorHandleFactory,
188  descriptor,
189  input,
190  output,
191  weights,
192  optionalBiases);
193 
194  // construct result object
195  LayerTestResult<T, 4> testResult(outputInfo);
196  testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
197  testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
198  armnnUtils::QuantizedVector<T>(expectedOutputData,
199  outputInfo.GetQuantizationScale(),
200  outputInfo.GetQuantizationOffset()));
201 
202  return testResult;
203 }
204 
205 template<typename T>
206 void SwizzleData(armnn::TensorInfo& inputInfo,
207  std::vector<T>& inputData,
208  armnn::TensorInfo& outputInfo,
209  std::vector<T>& outputData,
210  armnn::TensorInfo& weightsInfo,
211  std::vector<T>& weightsData)
212 {
213  PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
214  PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
215  PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
216 }
217 
218 } // anonymous namespace
219 
220 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
222  armnn::IWorkloadFactory& workloadFactory,
224  const armnn::ITensorHandleFactory& tensorHandleFactory,
225  bool biasEnabled,
226  const armnn::DataLayout layout)
227 {
228  using namespace armnn;
229 
230  constexpr unsigned int batches = 1u;
231  constexpr unsigned int channels = 1u;
232 
233  constexpr unsigned int wInput = 3u;
234  constexpr unsigned int hInput = wInput;
235 
236  constexpr unsigned int wOutput = 5u;
237  constexpr unsigned int hOutput = wOutput;
238 
239  constexpr unsigned int wWeights = 3u;
240  constexpr unsigned int hWeights = wWeights;
241 
242  TensorShape inputShape = { batches, channels, hInput, wInput };
243  TensorShape outputShape = { batches, channels, hOutput, wOutput };
244  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
245 
246  TensorInfo inputInfo(inputShape, ArmnnType);
247  TensorInfo outputInfo(outputShape, ArmnnType);
248  TensorInfo weightsInfo(weightsShape, ArmnnType);
249  TensorInfo biasesInfo({ channels }, ArmnnBType);
250 
251  std::vector<float> inputData =
252  {
253  1.f, 1.f, 1.f,
254  1.f, 1.f, 1.f,
255  1.f, 1.f, 1.f
256  };
257 
258  std::vector<float> weightsData =
259  {
260  1.f, 2.f, 3.f,
261  4.f, 5.f, 6.f,
262  7.f, 8.f, 9.f
263  };
264 
265  std::vector<float> biasesData = { 1.f };
266 
267  std::vector<float> expectedOutputData =
268  {
269  1.f, 3.f, 6.f, 5.f, 3.f,
270  5.f, 12.f, 21.f, 16.f, 9.f,
271  12.f, 27.f, 45.f, 33.f, 18.f,
272  11.f, 24.f, 39.f, 28.f, 15.f,
273  7.f, 15.f, 24.f, 17.f, 9.f
274  };
275 
276  if (biasEnabled)
277  {
278  // apply bias to expected output data
279  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
280  [&](float f) -> float { return f + biasesData[0]; });
281  }
282 
284  descriptor.m_StrideX = 1;
285  descriptor.m_StrideY = 1;
286  descriptor.m_BiasEnabled = biasEnabled;
287  descriptor.m_DataLayout = layout;
288 
289  // swizzle data if needed
290  if (layout == armnn::DataLayout::NHWC)
291  {
292  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
293  }
294 
295  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
296  memoryManager,
297  tensorHandleFactory,
298  descriptor,
299  inputInfo,
300  inputData,
301  outputInfo,
302  expectedOutputData,
303  weightsInfo,
304  weightsData,
305  biasesInfo,
306  biasesData);
307 }
308 
309 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
311  armnn::IWorkloadFactory& workloadFactory,
313  const armnn::ITensorHandleFactory& tensorHandleFactory,
314  bool biasEnabled,
315  const armnn::DataLayout layout)
316 {
317  using namespace armnn;
318 
319  constexpr unsigned int batches = 1u;
320  constexpr unsigned int channels = 1u;
321 
322  constexpr unsigned int wInput = 4u;
323  constexpr unsigned int hInput = wInput;
324 
325  constexpr unsigned int wOutput = 2u;
326  constexpr unsigned int hOutput = wOutput;
327 
328  constexpr unsigned int wWeights = 3u;
329  constexpr unsigned int hWeights = wWeights;
330 
331  TensorShape inputShape = { batches, channels, hInput, wInput };
332  TensorShape outputShape = { batches, channels, hOutput, wOutput };
333  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
334 
335  TensorInfo inputInfo(inputShape, ArmnnType);
336  TensorInfo outputInfo(outputShape, ArmnnType);
337  TensorInfo weightsInfo(weightsShape, ArmnnType);
338  TensorInfo biasesInfo({ channels }, ArmnnBType);
339 
340  std::vector<float> inputData =
341  {
342  1.f, 3.f, 2.f, 1.f,
343  1.f, 3.f, 3.f, 1.f,
344  2.f, 1.f, 1.f, 3.f,
345  3.f, 2.f, 3.f, 3.f
346  };
347 
348  std::vector<float> weightsData =
349  {
350  1.f, 2.f, 3.f,
351  0.f, 1.f, 0.f,
352  2.f, 1.f, 2.f
353  };
354 
355  std::vector<float> biasesData = { 1.f };
356 
357  std::vector<float> expectedOutputData =
358  {
359  21.f, 21.f,
360  28.f, 27.f
361  };
362 
363  if (biasEnabled)
364  {
365  // apply bias to expected output data
366  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
367  [&](float f) -> float { return f + biasesData[0]; });
368  }
369 
371  descriptor.m_PadLeft = 2;
372  descriptor.m_PadRight = 2;
373  descriptor.m_PadTop = 2;
374  descriptor.m_PadBottom = 2;
375  descriptor.m_StrideX = 1;
376  descriptor.m_StrideY = 1;
377  descriptor.m_BiasEnabled = biasEnabled;
378  descriptor.m_DataLayout = layout;
379 
380  // swizzle data if needed
381  if (layout == armnn::DataLayout::NHWC)
382  {
383  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
384  }
385 
386  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
387  memoryManager,
388  tensorHandleFactory,
389  descriptor,
390  inputInfo,
391  inputData,
392  outputInfo,
393  expectedOutputData,
394  weightsInfo,
395  weightsData,
396  biasesInfo,
397  biasesData);
398 }
399 
400 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
402  armnn::IWorkloadFactory& workloadFactory,
404  const armnn::ITensorHandleFactory& tensorHandleFactory,
405  bool biasEnabled,
406  const armnn::DataLayout layout)
407 {
408  using namespace armnn;
409 
410  constexpr unsigned int batches = 1u;
411  constexpr unsigned int channels = 1u;
412 
413  constexpr unsigned int wInput = 3u;
414  constexpr unsigned int hInput = wInput;
415 
416  constexpr unsigned int wOutput = 7u;
417  constexpr unsigned int hOutput = wOutput;
418 
419  constexpr unsigned int wWeights = 3u;
420  constexpr unsigned int hWeights = wWeights;
421 
422  TensorShape inputShape = { batches, channels, hInput, wInput };
423  TensorShape outputShape = { batches, channels, hOutput, wOutput };
424  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
425 
426  TensorInfo inputInfo(inputShape, ArmnnType);
427  TensorInfo outputInfo(outputShape, ArmnnType);
428  TensorInfo weightsInfo(weightsShape, ArmnnType);
429  TensorInfo biasesInfo({ channels }, ArmnnBType);
430 
431  std::vector<float> inputData =
432  {
433  1.f, 1.f, 1.f,
434  1.f, 1.f, 1.f,
435  1.f, 1.f, 1.f
436  };
437 
438  std::vector<float> weightsData =
439  {
440  1.f, 2.f, 3.f,
441  4.f, 5.f, 6.f,
442  7.f, 8.f, 9.f
443  };
444 
445  std::vector<float> biasesData = { 1.f };
446 
447  std::vector<float> expectedOutputData =
448  {
449  1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
450  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
451  8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
452  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
453  8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
454  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
455  7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
456  };
457 
458  if (biasEnabled)
459  {
460  // apply bias to expected output data
461  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
462  [&](float f) -> float { return f + biasesData[0]; });
463  }
464 
466  descriptor.m_StrideX = 2;
467  descriptor.m_StrideY = 2;
468  descriptor.m_BiasEnabled = biasEnabled;
469  descriptor.m_DataLayout = layout;
470 
471  // swizzle data if needed
472  if (layout == armnn::DataLayout::NHWC)
473  {
474  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
475  }
476 
477  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
478  memoryManager,
479  tensorHandleFactory,
480  descriptor,
481  inputInfo,
482  inputData,
483  outputInfo,
484  expectedOutputData,
485  weightsInfo,
486  weightsData,
487  biasesInfo,
488  biasesData);
489 }
490 
491 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
493  armnn::IWorkloadFactory& workloadFactory,
495  const armnn::ITensorHandleFactory& tensorHandleFactory,
496  const armnn::DataLayout layout)
497 {
498  using namespace armnn;
499 
500  TensorShape inputShape = { 1, 1, 2, 2 };
501  TensorShape outputShape = { 1, 2, 5, 5 };
502 
503  // OIHW for NCHW; OHWI for NHWC
504  TensorShape weightsShape = { 2, 1, 3, 3 };
505  TensorShape biasesShape = { 2 };
506 
507  TensorInfo inputInfo(inputShape, ArmnnType);
508  TensorInfo outputInfo(outputShape, ArmnnType);
509  TensorInfo weightsInfo(weightsShape, ArmnnType);
510  TensorInfo biasesInfo(biasesShape, ArmnnBType);
511 
512  std::vector<float> inputData =
513  {
514  1.f, 2.f,
515  3.f, 4.f,
516  };
517 
518  std::vector<float> weightsData =
519  {
520  1.f, 3.f, 5.f,
521  7.f, 9.f, 11.f,
522  13.f, 15.f, 17.f,
523 
524  2.f, 4.f, 6.f,
525  8.f, 10.f, 12.f,
526  14.f, 16.f, 18.f
527  };
528 
529  std::vector<float> biasesData = { -1.5f, -2.0f };
530 
531  std::vector<float> expectedOutputData =
532  {
533  -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
534  5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
535  14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
536  19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
537  37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
538 
539  0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
540  6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
541  18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
542  22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
543  40.0f, 46.0f, 108.0f, 62.0f, 70.0f
544  };
545 
547  descriptor.m_StrideX = 2;
548  descriptor.m_StrideY = 2;
549  descriptor.m_BiasEnabled = true;
550  descriptor.m_DataLayout = layout;
551 
552  // swizzle data if needed
553  if (layout == armnn::DataLayout::NHWC)
554  {
555  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
556  }
557 
558  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
559  memoryManager,
560  tensorHandleFactory,
561  descriptor,
562  inputInfo,
563  inputData,
564  outputInfo,
565  expectedOutputData,
566  weightsInfo,
567  weightsData,
568  biasesInfo,
569  biasesData);
570 }
571 
573  armnn::IWorkloadFactory& workloadFactory,
575  const armnn::ITensorHandleFactory& tensorHandleFactory,
576  const armnn::DataLayout layout)
577 {
578  using namespace armnn;
579 
580  const DataType inputType = DataType::QAsymmU8;
581  const DataType kernelType = DataType::QSymmS8;
582  const DataType biasType = DataType::Signed32;
583 
584  TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
585  TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
586 
587  const std::vector<float> quantScales{ 0.25f, 0.5f };
588  constexpr unsigned int quantDimension = 0;
589 
590  TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
591 
592  const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
593  TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
594 
595  std::vector<uint8_t> inputData =
596  {
597  12, 14,
598  16, 18
599  };
600 
601  std::vector<int8_t> kernelData =
602  {
603  4, 12, 20,
604  28, 36, 44,
605  52, 60, 68,
606 
607  4, 8, 12,
608  16, 20, 24,
609  28, 32, 36
610  };
611 
612  std::vector<int32_t> biasData = { -12, -8 };
613 
614  std::vector<uint8_t> expectedOutputData =
615  {
616  9, 13, 21, 19, 27,
617  21, 25, 57, 43, 51,
618  39, 55, 131, 91, 115,
619  49, 61, 129, 79, 95,
620  85, 97, 213, 127, 143,
621 
622  10, 14, 26, 22, 30,
623  22, 26, 62, 46, 54,
624  46, 62, 150, 102, 126,
625  54, 66, 142, 86, 102,
626  90, 102, 226, 134, 150
627  };
628 
629  if (layout == DataLayout::NHWC)
630  {
631  PermuteTensorNchwToNhwc(inputInfo, inputData);
632  PermuteTensorNchwToNhwc(kernelInfo, kernelData);
633  PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
634  }
635 
637  descriptor.m_StrideX = 2;
638  descriptor.m_StrideY = 2;
639  descriptor.m_BiasEnabled = true;
640  descriptor.m_DataLayout = layout;
641 
642  std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
643  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
644 
645  WorkloadInfo workloadInfo;
646  ScopedCpuTensorHandle weightTensor(kernelInfo);
647  ScopedCpuTensorHandle biasTensor(biasInfo);
648 
649  AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
650  AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
651 
653  queueDescriptor.m_Parameters = descriptor;
654  queueDescriptor.m_Weight = &weightTensor;
655  queueDescriptor.m_Bias = &biasTensor;
656 
657  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
658  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
659 
660  std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
661  inputHandle->Allocate();
662  outputHandle->Allocate();
663 
664  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
665 
666  ExecuteWorkload(*workload, memoryManager);
667 
668  LayerTestResult<uint8_t, 4> ret(outputInfo);
669  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
670  ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
671 
672  return ret;
673 }
674 
675 //
676 // Explicit template specializations
677 //
678 
680 SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
681  armnn::IWorkloadFactory& workloadFactory,
683  const armnn::ITensorHandleFactory& tensorHandleFactory,
684  bool biasEnabled,
685  const armnn::DataLayout layout);
686 
688 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
689  armnn::IWorkloadFactory& workloadFactory,
691  const armnn::ITensorHandleFactory& tensorHandleFactory,
692  bool biasEnabled,
693  const armnn::DataLayout layout);
694 
696 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
697  armnn::IWorkloadFactory& workloadFactory,
699  const armnn::ITensorHandleFactory& tensorHandleFactory,
700  bool biasEnabled,
701  const armnn::DataLayout layout);
702 
704 SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
705  armnn::IWorkloadFactory& workloadFactory,
707  const armnn::ITensorHandleFactory& tensorHandleFactory,
708  bool biasEnabled,
709  const armnn::DataLayout layout);
710 
712 PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
713  armnn::IWorkloadFactory& workloadFactory,
715  const armnn::ITensorHandleFactory& tensorHandleFactory,
716  bool biasEnabled,
717  const armnn::DataLayout layout);
718 
719 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
720 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
721  armnn::IWorkloadFactory& workloadFactory,
723  const armnn::ITensorHandleFactory& tensorHandleFactory,
724  bool biasEnabled,
725  const armnn::DataLayout layout);
726 
727 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
728 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
729  armnn::IWorkloadFactory& workloadFactory,
731  const armnn::ITensorHandleFactory& tensorHandleFactory,
732  bool biasEnabled,
733  const armnn::DataLayout layout);
734 
735 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
736 PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
737  armnn::IWorkloadFactory& workloadFactory,
739  const armnn::ITensorHandleFactory& tensorHandleFactory,
740  bool biasEnabled,
741  const armnn::DataLayout layout);
742 
743 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
744 StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
745  armnn::IWorkloadFactory& workloadFactory,
747  const armnn::ITensorHandleFactory& tensorHandleFactory,
748  bool biasEnabled,
749  const armnn::DataLayout layout);
750 
751 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
752 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
753  armnn::IWorkloadFactory& workloadFactory,
755  const armnn::ITensorHandleFactory& tensorHandleFactory,
756  bool biasEnabled,
757  const armnn::DataLayout layout);
758 
759 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
760 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
761  armnn::IWorkloadFactory& workloadFactory,
763  const armnn::ITensorHandleFactory& tensorHandleFactory,
764  bool biasEnabled,
765  const armnn::DataLayout layout);
766 
767 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
768 StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
769  armnn::IWorkloadFactory& workloadFactory,
771  const armnn::ITensorHandleFactory& tensorHandleFactory,
772  bool biasEnabled,
773  const armnn::DataLayout layout);
774 
775 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
776 MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
777  armnn::IWorkloadFactory& workloadFactory,
779  const armnn::ITensorHandleFactory& tensorHandleFactory,
780  const armnn::DataLayout layout);
781 
782 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
783 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
784  armnn::IWorkloadFactory& workloadFactory,
786  const armnn::ITensorHandleFactory& tensorHandleFactory,
787  const armnn::DataLayout layout);
788 
789 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
790 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
791  armnn::IWorkloadFactory& workloadFactory,
793  const armnn::ITensorHandleFactory& tensorHandleFactory,
794  const armnn::DataLayout layout);
795 
796 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
797 MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
798  armnn::IWorkloadFactory& workloadFactory,
800  const armnn::ITensorHandleFactory& tensorHandleFactory,
801  const armnn::DataLayout layout);
DataLayout
Definition: Types.hpp:50
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
bool m_BiasEnabled
Enable/disable bias.
boost::multi_array< T, n > outputExpected
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:73
LayerTestResult< T, 4 > SimpleTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > MultiChannelTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void PermuteTensorNchwToNhwc(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
DataType
Definition: Types.hpp:32
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:469
float GetQuantizationScale() const
Definition: Tensor.cpp:452
LayerTestResult< T, 4 > StridedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
boost::multi_array< T, n > output
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > TransposeConvolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< T, 4 > PaddedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)