ArmNN
 21.11
TransposeConvolution2dTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 
10 
11 #include <armnnUtils/Permute.hpp>
12 
14 
18 
20 
21 #include <test/TensorHelpers.hpp>
22 
23 #include <doctest/doctest.h>
24 
25 #include <string>
26 #include <utility>
27 #include <vector>
28 
29 namespace
30 {
31 
32 template<typename T>
33 using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
34 
35 template<typename T>
36 void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
37 {
38  if (data.first.GetNumElements() > data.second.size())
39  {
40  throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
41  std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
42  }
43 }
44 
45 template<typename T, typename BT>
46 void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
48  const armnn::ITensorHandleFactory& tensorHandleFactory,
50  const TensorData<T>& input,
51  TensorData<T>& output,
52  const TensorData<T>& weights,
53  const armnn::Optional<TensorData<BT>>& biases)
54 {
55  IgnoreUnused(memoryManager);
56  using namespace armnn;
57 
58  VerifyInputTensorData(input, "input");
59  VerifyInputTensorData(weights, "biases");
60 
61  if (descriptor.m_BiasEnabled)
62  {
63  if (!biases.has_value())
64  {
65  throw InvalidArgumentException("Bias enabled but no bias data provided");
66  }
67  VerifyInputTensorData(biases.value(), "biases");
68  }
69 
70  // set up weights
71  ScopedTensorHandle weightsTensor(weights.first);
72 
74  queueDescriptor.m_Parameters = descriptor;
75  queueDescriptor.m_Weight = &weightsTensor;
76 
77  AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
78 
79  std::unique_ptr<ScopedTensorHandle> biasesTensor;
80  if (descriptor.m_BiasEnabled)
81  {
82  // set up biases
83  biasesTensor = std::make_unique<ScopedTensorHandle>(biases.value().first);
84  queueDescriptor.m_Bias = biasesTensor.get();
85 
86  AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
87  }
88 
89  // set up input and output handles
90  std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(input.first);
91  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(output.first);
92 
93  // set up workload
94  armnn::WorkloadInfo workloadInfo;
95  AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
96  AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
97 
98  std::unique_ptr<armnn::IWorkload> workload =
99  workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
100 
101  inputHandle->Allocate();
102  outputHandle->Allocate();
103 
104  CopyDataToITensorHandle(inputHandle.get(), input.second.data());
105 
106  ExecuteWorkload(*workload, memoryManager);
107 
108  // copy output
109  output.second = std::vector<T>(output.first.GetNumElements(), T());
110  CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
111 }
112 
113 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
114 LayerTestResult<T, 4> TransposeConvolution2dTest(
115  armnn::IWorkloadFactory& workloadFactory,
117  const armnn::ITensorHandleFactory& tensorHandleFactory,
118  const armnn::TransposeConvolution2dDescriptor& descriptor,
119  armnn::TensorInfo& inputInfo,
120  const std::vector<float>& inputData,
121  armnn::TensorInfo& outputInfo,
122  const std::vector<float>& expectedOutputData,
123  armnn::TensorInfo& weightsInfo,
124  const std::vector<float>& weightsData,
125  armnn::TensorInfo& biasesInfo,
126  const std::vector<float>& biasesData)
127 {
128  using namespace armnn;
129 
130  // set up quantization parameters
131  if (armnn::IsQuantizedType<T>())
132  {
133  constexpr float qScale = 0.50f;
134  constexpr int32_t qOffset = 10;
135 
136  inputInfo.SetQuantizationScale(qScale);
137  inputInfo.SetQuantizationOffset(qOffset);
138 
139  outputInfo.SetQuantizationScale(qScale);
140  outputInfo.SetQuantizationOffset(qOffset);
141 
142  weightsInfo.SetQuantizationScale(qScale);
143  weightsInfo.SetQuantizationOffset(qOffset);
144 
145  biasesInfo.SetQuantizationScale(qScale * qScale);
146  biasesInfo.SetQuantizationOffset(0);
147  }
148 
149  // set up input
150  TensorData<T> input =
151  {
152  inputInfo,
153  armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
154  };
155 
156  // set up weights
157  TensorData<T> weights =
158  {
159  weightsInfo,
160  armnnUtils::QuantizedVector<T>(weightsData,
161  weightsInfo.GetQuantizationScale(),
162  weightsInfo.GetQuantizationOffset())
163  };
164 
165  // set up biases
167  Optional<TensorData<BT>> optionalBiases;
168  if (descriptor.m_BiasEnabled)
169  {
170  TensorData<BT> biases =
171  {
172  biasesInfo,
173  armnnUtils::QuantizedVector<BT>(biasesData,
174  biasesInfo.GetQuantizationScale(),
175  biasesInfo.GetQuantizationOffset())
176  };
177 
178  optionalBiases = Optional<TensorData<BT>>(biases);
179  }
180 
181  // set up output
182  TensorData<T> output = { outputInfo, {} };
183 
184  // execute test
185  TransposeConvolution2dTestImpl(workloadFactory,
186  memoryManager,
187  tensorHandleFactory,
188  descriptor,
189  input,
190  output,
191  weights,
192  optionalBiases);
193 
194  // construct result object
195  LayerTestResult<T, 4> testResult(outputInfo);
196  testResult.m_ActualData = output.second;
197  testResult.m_ExpectedData = armnnUtils::QuantizedVector<T>(expectedOutputData,
198  outputInfo.GetQuantizationScale(),
199  outputInfo.GetQuantizationOffset());
200 
201  return testResult;
202 }
203 
204 template<typename T>
205 void SwizzleData(armnn::TensorInfo& inputInfo,
206  std::vector<T>& inputData,
207  armnn::TensorInfo& outputInfo,
208  std::vector<T>& outputData,
209  armnn::TensorInfo& weightsInfo,
210  std::vector<T>& weightsData)
211 {
212  PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
213  PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
214  PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
215 }
216 
217 } // anonymous namespace
218 
219 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
221  armnn::IWorkloadFactory& workloadFactory,
223  const armnn::ITensorHandleFactory& tensorHandleFactory,
224  bool biasEnabled,
225  const armnn::DataLayout layout)
226 {
227  using namespace armnn;
228 
229  constexpr unsigned int batches = 1u;
230  constexpr unsigned int channels = 1u;
231 
232  constexpr unsigned int wInput = 3u;
233  constexpr unsigned int hInput = wInput;
234 
235  constexpr unsigned int wOutput = 5u;
236  constexpr unsigned int hOutput = wOutput;
237 
238  constexpr unsigned int wWeights = 3u;
239  constexpr unsigned int hWeights = wWeights;
240 
241  TensorShape inputShape = { batches, channels, hInput, wInput };
242  TensorShape outputShape = { batches, channels, hOutput, wOutput };
243  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
244 
245  TensorInfo inputInfo(inputShape, ArmnnType);
246  TensorInfo outputInfo(outputShape, ArmnnType);
247  TensorInfo weightsInfo(weightsShape, ArmnnType);
248  TensorInfo biasesInfo({ channels }, ArmnnBType);
249 
250  std::vector<float> inputData =
251  {
252  1.f, 1.f, 1.f,
253  1.f, 1.f, 1.f,
254  1.f, 1.f, 1.f
255  };
256 
257  std::vector<float> weightsData =
258  {
259  1.f, 2.f, 3.f,
260  4.f, 5.f, 6.f,
261  7.f, 8.f, 9.f
262  };
263 
264  std::vector<float> biasesData = { 1.f };
265 
266  std::vector<float> expectedOutputData =
267  {
268  1.f, 3.f, 6.f, 5.f, 3.f,
269  5.f, 12.f, 21.f, 16.f, 9.f,
270  12.f, 27.f, 45.f, 33.f, 18.f,
271  11.f, 24.f, 39.f, 28.f, 15.f,
272  7.f, 15.f, 24.f, 17.f, 9.f
273  };
274 
275  if (biasEnabled)
276  {
277  // apply bias to expected output data
278  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
279  [&](float f) -> float { return f + biasesData[0]; });
280  }
281 
283  descriptor.m_StrideX = 1;
284  descriptor.m_StrideY = 1;
285  descriptor.m_BiasEnabled = biasEnabled;
286  descriptor.m_DataLayout = layout;
287 
288  // swizzle data if needed
289  if (layout == armnn::DataLayout::NHWC)
290  {
291  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
292  }
293 
294  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
295  memoryManager,
296  tensorHandleFactory,
297  descriptor,
298  inputInfo,
299  inputData,
300  outputInfo,
301  expectedOutputData,
302  weightsInfo,
303  weightsData,
304  biasesInfo,
305  biasesData);
306 }
307 
308 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
310  armnn::IWorkloadFactory& workloadFactory,
312  const armnn::ITensorHandleFactory& tensorHandleFactory,
313  bool biasEnabled,
314  const armnn::DataLayout layout)
315 {
316  using namespace armnn;
317 
318  constexpr unsigned int batches = 1u;
319  constexpr unsigned int channels = 1u;
320 
321  constexpr unsigned int wInput = 4u;
322  constexpr unsigned int hInput = wInput;
323 
324  constexpr unsigned int wOutput = 2u;
325  constexpr unsigned int hOutput = wOutput;
326 
327  constexpr unsigned int wWeights = 3u;
328  constexpr unsigned int hWeights = wWeights;
329 
330  TensorShape inputShape = { batches, channels, hInput, wInput };
331  TensorShape outputShape = { batches, channels, hOutput, wOutput };
332  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
333 
334  TensorInfo inputInfo(inputShape, ArmnnType);
335  TensorInfo outputInfo(outputShape, ArmnnType);
336  TensorInfo weightsInfo(weightsShape, ArmnnType);
337  TensorInfo biasesInfo({ channels }, ArmnnBType);
338 
339  std::vector<float> inputData =
340  {
341  1.f, 3.f, 2.f, 1.f,
342  1.f, 3.f, 3.f, 1.f,
343  2.f, 1.f, 1.f, 3.f,
344  3.f, 2.f, 3.f, 3.f
345  };
346 
347  std::vector<float> weightsData =
348  {
349  1.f, 2.f, 3.f,
350  0.f, 1.f, 0.f,
351  2.f, 1.f, 2.f
352  };
353 
354  std::vector<float> biasesData = { 1.f };
355 
356  std::vector<float> expectedOutputData =
357  {
358  21.f, 21.f,
359  28.f, 27.f
360  };
361 
362  if (biasEnabled)
363  {
364  // apply bias to expected output data
365  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
366  [&](float f) -> float { return f + biasesData[0]; });
367  }
368 
370  descriptor.m_PadLeft = 2;
371  descriptor.m_PadRight = 2;
372  descriptor.m_PadTop = 2;
373  descriptor.m_PadBottom = 2;
374  descriptor.m_StrideX = 1;
375  descriptor.m_StrideY = 1;
376  descriptor.m_BiasEnabled = biasEnabled;
377  descriptor.m_DataLayout = layout;
378 
379  // swizzle data if needed
380  if (layout == armnn::DataLayout::NHWC)
381  {
382  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
383  }
384 
385  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
386  memoryManager,
387  tensorHandleFactory,
388  descriptor,
389  inputInfo,
390  inputData,
391  outputInfo,
392  expectedOutputData,
393  weightsInfo,
394  weightsData,
395  biasesInfo,
396  biasesData);
397 }
398 
399 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
401  armnn::IWorkloadFactory& workloadFactory,
403  const armnn::ITensorHandleFactory& tensorHandleFactory,
404  bool biasEnabled,
405  const armnn::DataLayout layout)
406 {
407  using namespace armnn;
408 
409  constexpr unsigned int batches = 1u;
410  constexpr unsigned int channels = 1u;
411 
412  constexpr unsigned int wInput = 3u;
413  constexpr unsigned int hInput = wInput;
414 
415  constexpr unsigned int wOutput = 7u;
416  constexpr unsigned int hOutput = wOutput;
417 
418  constexpr unsigned int wWeights = 3u;
419  constexpr unsigned int hWeights = wWeights;
420 
421  TensorShape inputShape = { batches, channels, hInput, wInput };
422  TensorShape outputShape = { batches, channels, hOutput, wOutput };
423  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
424 
425  TensorInfo inputInfo(inputShape, ArmnnType);
426  TensorInfo outputInfo(outputShape, ArmnnType);
427  TensorInfo weightsInfo(weightsShape, ArmnnType);
428  TensorInfo biasesInfo({ channels }, ArmnnBType);
429 
430  std::vector<float> inputData =
431  {
432  1.f, 1.f, 1.f,
433  1.f, 1.f, 1.f,
434  1.f, 1.f, 1.f
435  };
436 
437  std::vector<float> weightsData =
438  {
439  1.f, 2.f, 3.f,
440  4.f, 5.f, 6.f,
441  7.f, 8.f, 9.f
442  };
443 
444  std::vector<float> biasesData = { 1.f };
445 
446  std::vector<float> expectedOutputData =
447  {
448  1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
449  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
450  8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
451  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
452  8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
453  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
454  7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
455  };
456 
457  if (biasEnabled)
458  {
459  // apply bias to expected output data
460  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
461  [&](float f) -> float { return f + biasesData[0]; });
462  }
463 
465  descriptor.m_StrideX = 2;
466  descriptor.m_StrideY = 2;
467  descriptor.m_BiasEnabled = biasEnabled;
468  descriptor.m_DataLayout = layout;
469 
470  // swizzle data if needed
471  if (layout == armnn::DataLayout::NHWC)
472  {
473  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
474  }
475 
476  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
477  memoryManager,
478  tensorHandleFactory,
479  descriptor,
480  inputInfo,
481  inputData,
482  outputInfo,
483  expectedOutputData,
484  weightsInfo,
485  weightsData,
486  biasesInfo,
487  biasesData);
488 }
489 
490 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
492  armnn::IWorkloadFactory& workloadFactory,
494  const armnn::ITensorHandleFactory& tensorHandleFactory,
495  const armnn::DataLayout layout)
496 {
497  using namespace armnn;
498 
499  TensorShape inputShape = { 1, 1, 2, 2 };
500  TensorShape outputShape = { 1, 2, 5, 5 };
501 
502  // OIHW for NCHW; OHWI for NHWC
503  TensorShape weightsShape = { 2, 1, 3, 3 };
504  TensorShape biasesShape = { 2 };
505 
506  TensorInfo inputInfo(inputShape, ArmnnType);
507  TensorInfo outputInfo(outputShape, ArmnnType);
508  TensorInfo weightsInfo(weightsShape, ArmnnType);
509  TensorInfo biasesInfo(biasesShape, ArmnnBType);
510 
511  std::vector<float> inputData =
512  {
513  1.f, 2.f,
514  3.f, 4.f,
515  };
516 
517  std::vector<float> weightsData =
518  {
519  1.f, 3.f, 5.f,
520  7.f, 9.f, 11.f,
521  13.f, 15.f, 17.f,
522 
523  2.f, 4.f, 6.f,
524  8.f, 10.f, 12.f,
525  14.f, 16.f, 18.f
526  };
527 
528  std::vector<float> biasesData = { -1.5f, -2.0f };
529 
530  std::vector<float> expectedOutputData =
531  {
532  -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
533  5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
534  14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
535  19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
536  37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
537 
538  0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
539  6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
540  18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
541  22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
542  40.0f, 46.0f, 108.0f, 62.0f, 70.0f
543  };
544 
546  descriptor.m_StrideX = 2;
547  descriptor.m_StrideY = 2;
548  descriptor.m_BiasEnabled = true;
549  descriptor.m_DataLayout = layout;
550 
551  // swizzle data if needed
552  if (layout == armnn::DataLayout::NHWC)
553  {
554  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
555  }
556 
557  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
558  memoryManager,
559  tensorHandleFactory,
560  descriptor,
561  inputInfo,
562  inputData,
563  outputInfo,
564  expectedOutputData,
565  weightsInfo,
566  weightsData,
567  biasesInfo,
568  biasesData);
569 }
570 
572  armnn::IWorkloadFactory& workloadFactory,
574  const armnn::ITensorHandleFactory& tensorHandleFactory,
575  const armnn::DataLayout layout)
576 {
577  using namespace armnn;
578 
579  const DataType inputType = DataType::QAsymmU8;
580  const DataType kernelType = DataType::QSymmS8;
581  const DataType biasType = DataType::Signed32;
582 
583  TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
584  TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
585 
586  const std::vector<float> quantScales{ 0.25f, 0.5f };
587  constexpr unsigned int quantDimension = 0;
588 
589  TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
590 
591  const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
592  TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
593 
594  std::vector<uint8_t> inputData =
595  {
596  12, 14,
597  16, 18
598  };
599 
600  std::vector<int8_t> kernelData =
601  {
602  4, 12, 20,
603  28, 36, 44,
604  52, 60, 68,
605 
606  4, 8, 12,
607  16, 20, 24,
608  28, 32, 36
609  };
610 
611  std::vector<int32_t> biasData = { -12, -8 };
612 
613  std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
614 
615  std::vector<uint8_t> expectedOutputData =
616  {
617  9, 13, 21, 19, 27,
618  21, 25, 57, 43, 51,
619  39, 55, 131, 91, 115,
620  49, 61, 129, 79, 95,
621  85, 97, 213, 127, 143,
622 
623  10, 14, 26, 22, 30,
624  22, 26, 62, 46, 54,
625  46, 62, 150, 102, 126,
626  54, 66, 142, 86, 102,
627  90, 102, 226, 134, 150
628  };
629 
630  if (layout == DataLayout::NHWC)
631  {
632  PermuteTensorNchwToNhwc(inputInfo, inputData);
633  PermuteTensorNchwToNhwc(kernelInfo, kernelData);
634  PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
635  }
636 
638  descriptor.m_StrideX = 2;
639  descriptor.m_StrideY = 2;
640  descriptor.m_BiasEnabled = true;
641  descriptor.m_DataLayout = layout;
642 
643  std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
644  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
645 
646  WorkloadInfo workloadInfo;
647  ScopedTensorHandle weightTensor(kernelInfo);
648  ScopedTensorHandle biasTensor(biasInfo);
649 
650  AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
651  AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
652 
654  queueDescriptor.m_Parameters = descriptor;
655  queueDescriptor.m_Weight = &weightTensor;
656  queueDescriptor.m_Bias = &biasTensor;
657 
658  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
659  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
660 
661  std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
662  inputHandle->Allocate();
663  outputHandle->Allocate();
664 
665  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
666 
667  ExecuteWorkload(*workload, memoryManager);
668 
669  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
670 
671  return LayerTestResult<uint8_t, 4>(actualOutput,
672  expectedOutputData,
673  outputHandle->GetShape(),
674  outputInfo.GetShape());
675 }
676 
677 //
678 // Explicit template specializations
679 //
680 
682 SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
683  armnn::IWorkloadFactory& workloadFactory,
685  const armnn::ITensorHandleFactory& tensorHandleFactory,
686  bool biasEnabled,
687  const armnn::DataLayout layout);
688 
690 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
691  armnn::IWorkloadFactory& workloadFactory,
693  const armnn::ITensorHandleFactory& tensorHandleFactory,
694  bool biasEnabled,
695  const armnn::DataLayout layout);
696 
698 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
699  armnn::IWorkloadFactory& workloadFactory,
701  const armnn::ITensorHandleFactory& tensorHandleFactory,
702  bool biasEnabled,
703  const armnn::DataLayout layout);
704 
706 SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
707  armnn::IWorkloadFactory& workloadFactory,
709  const armnn::ITensorHandleFactory& tensorHandleFactory,
710  bool biasEnabled,
711  const armnn::DataLayout layout);
712 
713 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
714 PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
715  armnn::IWorkloadFactory& workloadFactory,
717  const armnn::ITensorHandleFactory& tensorHandleFactory,
718  bool biasEnabled,
719  const armnn::DataLayout layout);
720 
721 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
722 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
723  armnn::IWorkloadFactory& workloadFactory,
725  const armnn::ITensorHandleFactory& tensorHandleFactory,
726  bool biasEnabled,
727  const armnn::DataLayout layout);
728 
729 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
730 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
731  armnn::IWorkloadFactory& workloadFactory,
733  const armnn::ITensorHandleFactory& tensorHandleFactory,
734  bool biasEnabled,
735  const armnn::DataLayout layout);
736 
737 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
738 PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
739  armnn::IWorkloadFactory& workloadFactory,
741  const armnn::ITensorHandleFactory& tensorHandleFactory,
742  bool biasEnabled,
743  const armnn::DataLayout layout);
744 
745 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
746 StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
747  armnn::IWorkloadFactory& workloadFactory,
749  const armnn::ITensorHandleFactory& tensorHandleFactory,
750  bool biasEnabled,
751  const armnn::DataLayout layout);
752 
753 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
754 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
755  armnn::IWorkloadFactory& workloadFactory,
757  const armnn::ITensorHandleFactory& tensorHandleFactory,
758  bool biasEnabled,
759  const armnn::DataLayout layout);
760 
761 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
762 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
763  armnn::IWorkloadFactory& workloadFactory,
765  const armnn::ITensorHandleFactory& tensorHandleFactory,
766  bool biasEnabled,
767  const armnn::DataLayout layout);
768 
769 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
770 StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
771  armnn::IWorkloadFactory& workloadFactory,
773  const armnn::ITensorHandleFactory& tensorHandleFactory,
774  bool biasEnabled,
775  const armnn::DataLayout layout);
776 
777 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
778 MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
779  armnn::IWorkloadFactory& workloadFactory,
781  const armnn::ITensorHandleFactory& tensorHandleFactory,
782  const armnn::DataLayout layout);
783 
784 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
785 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
786  armnn::IWorkloadFactory& workloadFactory,
788  const armnn::ITensorHandleFactory& tensorHandleFactory,
789  const armnn::DataLayout layout);
790 
791 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
792 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
793  armnn::IWorkloadFactory& workloadFactory,
795  const armnn::ITensorHandleFactory& tensorHandleFactory,
796  const armnn::DataLayout layout);
797 
798 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
799 MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
800  armnn::IWorkloadFactory& workloadFactory,
802  const armnn::ITensorHandleFactory& tensorHandleFactory,
803  const armnn::DataLayout layout);
DataLayout
Definition: Types.hpp:49
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
bool m_BiasEnabled
Enable/disable bias.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
LayerTestResult< T, 4 > SimpleTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > MultiChannelTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void PermuteTensorNchwToNhwc(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
DataType
Definition: Types.hpp:35
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
LayerTestResult< T, 4 > StridedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > TransposeConvolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< T, 4 > PaddedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)