ArmNN
 20.08
TransposeConvolution2dTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 
10 
11 #include <armnnUtils/Permute.hpp>
12 
14 
18 
20 
21 #include <test/TensorHelpers.hpp>
22 
23 #include <boost/test/unit_test.hpp>
24 
25 #include <string>
26 #include <utility>
27 #include <vector>
28 
29 namespace
30 {
31 
32 template<typename T>
33 using TensorData = std::pair<armnn::TensorInfo, std::vector<T>>;
34 
35 template<typename T>
36 void VerifyInputTensorData(const TensorData<T>& data, const std::string& tensorName)
37 {
38  if (data.first.GetNumElements() > data.second.size())
39  {
40  throw armnn::InvalidArgumentException("Size of data too small for " + tensorName + ": expected " +
41  std::to_string(data.first.GetNumElements()) + "but got " + std::to_string(data.second.size()));
42  }
43 }
44 
45 template<typename T, typename BT>
46 void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
49  const TensorData<T>& input,
50  TensorData<T>& output,
51  const TensorData<T>& weights,
52  const armnn::Optional<TensorData<BT>>& biases)
53 {
54  IgnoreUnused(memoryManager);
55  using namespace armnn;
56 
57  VerifyInputTensorData(input, "input");
58  VerifyInputTensorData(weights, "biases");
59 
60  if (descriptor.m_BiasEnabled)
61  {
62  if (!biases.has_value())
63  {
64  throw InvalidArgumentException("Bias enabled but no bias data provided");
65  }
66  VerifyInputTensorData(biases.value(), "biases");
67  }
68 
69  // set up weights
70  ScopedCpuTensorHandle weightsTensor(weights.first);
71 
73  queueDescriptor.m_Parameters = descriptor;
74  queueDescriptor.m_Weight = &weightsTensor;
75 
76  AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.second.data());
77 
78  std::unique_ptr<ScopedCpuTensorHandle> biasesTensor;
79  if (descriptor.m_BiasEnabled)
80  {
81  // set up biases
82  biasesTensor = std::make_unique<ScopedCpuTensorHandle>(biases.value().first);
83  queueDescriptor.m_Bias = biasesTensor.get();
84 
85  AllocateAndCopyDataToITensorHandle(biasesTensor.get(), biases.value().second.data());
86  }
87 
89  // set up input and output handles
90  std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(input.first);
91  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(output.first);
93 
94  // set up workload
95  armnn::WorkloadInfo workloadInfo;
96  AddInputToWorkload(queueDescriptor, workloadInfo, input.first, inputHandle.get());
97  AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
98 
99  std::unique_ptr<armnn::IWorkload> workload =
100  workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
101 
102  inputHandle->Allocate();
103  outputHandle->Allocate();
104 
105  CopyDataToITensorHandle(inputHandle.get(), input.second.data());
106 
107  ExecuteWorkload(*workload, memoryManager);
108 
109  // copy output
110  output.second = std::vector<T>(output.first.GetNumElements(), T());
111  CopyDataFromITensorHandle(output.second.data(), outputHandle.get());
112 }
113 
114 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
115 LayerTestResult<T, 4> TransposeConvolution2dTest(
116  armnn::IWorkloadFactory& workloadFactory,
118  const armnn::TransposeConvolution2dDescriptor& descriptor,
119  armnn::TensorInfo& inputInfo,
120  const std::vector<float>& inputData,
121  armnn::TensorInfo& outputInfo,
122  const std::vector<float>& expectedOutputData,
123  armnn::TensorInfo& weightsInfo,
124  const std::vector<float>& weightsData,
125  armnn::TensorInfo& biasesInfo,
126  const std::vector<float>& biasesData)
127 {
128  using namespace armnn;
129 
130  // set up quantization parameters
131  if (armnn::IsQuantizedType<T>())
132  {
133  constexpr float qScale = 0.50f;
134  constexpr int32_t qOffset = 10;
135 
136  inputInfo.SetQuantizationScale(qScale);
137  inputInfo.SetQuantizationOffset(qOffset);
138 
139  outputInfo.SetQuantizationScale(qScale);
140  outputInfo.SetQuantizationOffset(qOffset);
141 
142  weightsInfo.SetQuantizationScale(qScale);
143  weightsInfo.SetQuantizationOffset(qOffset);
144 
145  biasesInfo.SetQuantizationScale(qScale * qScale);
146  biasesInfo.SetQuantizationOffset(0);
147  }
148 
149  // set up input
150  TensorData<T> input =
151  {
152  inputInfo,
153  armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
154  };
155 
156  // set up weights
157  TensorData<T> weights =
158  {
159  weightsInfo,
160  armnnUtils::QuantizedVector<T>(weightsData,
161  weightsInfo.GetQuantizationScale(),
162  weightsInfo.GetQuantizationOffset())
163  };
164 
165  // set up biases
167  Optional<TensorData<BT>> optionalBiases;
168  if (descriptor.m_BiasEnabled)
169  {
170  TensorData<BT> biases =
171  {
172  biasesInfo,
173  armnnUtils::QuantizedVector<BT>(biasesData,
174  biasesInfo.GetQuantizationScale(),
175  biasesInfo.GetQuantizationOffset())
176  };
177 
178  optionalBiases = Optional<TensorData<BT>>(biases);
179  }
180 
181  // set up output
182  TensorData<T> output = { outputInfo, {} };
183 
184  // execute test
185  TransposeConvolution2dTestImpl(workloadFactory,
186  memoryManager,
187  descriptor,
188  input,
189  output,
190  weights,
191  optionalBiases);
192 
193  // construct result object
194  LayerTestResult<T, 4> testResult(outputInfo);
195  testResult.output = MakeTensor<T, 4>(outputInfo, output.second);
196  testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
197  armnnUtils::QuantizedVector<T>(expectedOutputData,
198  outputInfo.GetQuantizationScale(),
199  outputInfo.GetQuantizationOffset()));
200 
201  return testResult;
202 }
203 
204 template<typename T>
205 void SwizzleData(armnn::TensorInfo& inputInfo,
206  std::vector<T>& inputData,
207  armnn::TensorInfo& outputInfo,
208  std::vector<T>& outputData,
209  armnn::TensorInfo& weightsInfo,
210  std::vector<T>& weightsData)
211 {
212  PermuteTensorNchwToNhwc<T>(inputInfo, inputData);
213  PermuteTensorNchwToNhwc<T>(outputInfo, outputData);
214  PermuteTensorNchwToNhwc<T>(weightsInfo, weightsData);
215 }
216 
217 } // anonymous namespace
218 
219 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
221  armnn::IWorkloadFactory& workloadFactory,
223  bool biasEnabled,
224  const armnn::DataLayout layout)
225 {
226  using namespace armnn;
227 
228  constexpr unsigned int batches = 1u;
229  constexpr unsigned int channels = 1u;
230 
231  constexpr unsigned int wInput = 3u;
232  constexpr unsigned int hInput = wInput;
233 
234  constexpr unsigned int wOutput = 5u;
235  constexpr unsigned int hOutput = wOutput;
236 
237  constexpr unsigned int wWeights = 3u;
238  constexpr unsigned int hWeights = wWeights;
239 
240  TensorShape inputShape = { batches, channels, hInput, wInput };
241  TensorShape outputShape = { batches, channels, hOutput, wOutput };
242  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
243 
244  TensorInfo inputInfo(inputShape, ArmnnType);
245  TensorInfo outputInfo(outputShape, ArmnnType);
246  TensorInfo weightsInfo(weightsShape, ArmnnType);
247  TensorInfo biasesInfo({ channels }, ArmnnBType);
248 
249  std::vector<float> inputData =
250  {
251  1.f, 1.f, 1.f,
252  1.f, 1.f, 1.f,
253  1.f, 1.f, 1.f
254  };
255 
256  std::vector<float> weightsData =
257  {
258  1.f, 2.f, 3.f,
259  4.f, 5.f, 6.f,
260  7.f, 8.f, 9.f
261  };
262 
263  std::vector<float> biasesData = { 1.f };
264 
265  std::vector<float> expectedOutputData =
266  {
267  1.f, 3.f, 6.f, 5.f, 3.f,
268  5.f, 12.f, 21.f, 16.f, 9.f,
269  12.f, 27.f, 45.f, 33.f, 18.f,
270  11.f, 24.f, 39.f, 28.f, 15.f,
271  7.f, 15.f, 24.f, 17.f, 9.f
272  };
273 
274  if (biasEnabled)
275  {
276  // apply bias to expected output data
277  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
278  [&](float f) -> float { return f + biasesData[0]; });
279  }
280 
282  descriptor.m_StrideX = 1;
283  descriptor.m_StrideY = 1;
284  descriptor.m_BiasEnabled = biasEnabled;
285  descriptor.m_DataLayout = layout;
286 
287  // swizzle data if needed
288  if (layout == armnn::DataLayout::NHWC)
289  {
290  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
291  }
292 
293  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
294  memoryManager,
295  descriptor,
296  inputInfo,
297  inputData,
298  outputInfo,
299  expectedOutputData,
300  weightsInfo,
301  weightsData,
302  biasesInfo,
303  biasesData);
304 }
305 
306 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
308  armnn::IWorkloadFactory& workloadFactory,
310  bool biasEnabled,
311  const armnn::DataLayout layout)
312 {
313  using namespace armnn;
314 
315  constexpr unsigned int batches = 1u;
316  constexpr unsigned int channels = 1u;
317 
318  constexpr unsigned int wInput = 4u;
319  constexpr unsigned int hInput = wInput;
320 
321  constexpr unsigned int wOutput = 2u;
322  constexpr unsigned int hOutput = wOutput;
323 
324  constexpr unsigned int wWeights = 3u;
325  constexpr unsigned int hWeights = wWeights;
326 
327  TensorShape inputShape = { batches, channels, hInput, wInput };
328  TensorShape outputShape = { batches, channels, hOutput, wOutput };
329  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
330 
331  TensorInfo inputInfo(inputShape, ArmnnType);
332  TensorInfo outputInfo(outputShape, ArmnnType);
333  TensorInfo weightsInfo(weightsShape, ArmnnType);
334  TensorInfo biasesInfo({ channels }, ArmnnBType);
335 
336  std::vector<float> inputData =
337  {
338  1.f, 3.f, 2.f, 1.f,
339  1.f, 3.f, 3.f, 1.f,
340  2.f, 1.f, 1.f, 3.f,
341  3.f, 2.f, 3.f, 3.f
342  };
343 
344  std::vector<float> weightsData =
345  {
346  1.f, 2.f, 3.f,
347  0.f, 1.f, 0.f,
348  2.f, 1.f, 2.f
349  };
350 
351  std::vector<float> biasesData = { 1.f };
352 
353  std::vector<float> expectedOutputData =
354  {
355  21.f, 21.f,
356  28.f, 27.f
357  };
358 
359  if (biasEnabled)
360  {
361  // apply bias to expected output data
362  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
363  [&](float f) -> float { return f + biasesData[0]; });
364  }
365 
367  descriptor.m_PadLeft = 2;
368  descriptor.m_PadRight = 2;
369  descriptor.m_PadTop = 2;
370  descriptor.m_PadBottom = 2;
371  descriptor.m_StrideX = 1;
372  descriptor.m_StrideY = 1;
373  descriptor.m_BiasEnabled = biasEnabled;
374  descriptor.m_DataLayout = layout;
375 
376  // swizzle data if needed
377  if (layout == armnn::DataLayout::NHWC)
378  {
379  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
380  }
381 
382  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
383  memoryManager,
384  descriptor,
385  inputInfo,
386  inputData,
387  outputInfo,
388  expectedOutputData,
389  weightsInfo,
390  weightsData,
391  biasesInfo,
392  biasesData);
393 }
394 
395 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
397  armnn::IWorkloadFactory& workloadFactory,
399  bool biasEnabled,
400  const armnn::DataLayout layout)
401 {
402  using namespace armnn;
403 
404  constexpr unsigned int batches = 1u;
405  constexpr unsigned int channels = 1u;
406 
407  constexpr unsigned int wInput = 3u;
408  constexpr unsigned int hInput = wInput;
409 
410  constexpr unsigned int wOutput = 7u;
411  constexpr unsigned int hOutput = wOutput;
412 
413  constexpr unsigned int wWeights = 3u;
414  constexpr unsigned int hWeights = wWeights;
415 
416  TensorShape inputShape = { batches, channels, hInput, wInput };
417  TensorShape outputShape = { batches, channels, hOutput, wOutput };
418  TensorShape weightsShape = { batches, channels, hWeights, wWeights };
419 
420  TensorInfo inputInfo(inputShape, ArmnnType);
421  TensorInfo outputInfo(outputShape, ArmnnType);
422  TensorInfo weightsInfo(weightsShape, ArmnnType);
423  TensorInfo biasesInfo({ channels }, ArmnnBType);
424 
425  std::vector<float> inputData =
426  {
427  1.f, 1.f, 1.f,
428  1.f, 1.f, 1.f,
429  1.f, 1.f, 1.f
430  };
431 
432  std::vector<float> weightsData =
433  {
434  1.f, 2.f, 3.f,
435  4.f, 5.f, 6.f,
436  7.f, 8.f, 9.f
437  };
438 
439  std::vector<float> biasesData = { 1.f };
440 
441  std::vector<float> expectedOutputData =
442  {
443  1.f, 2.f, 4.f, 2.f, 4.f, 2.f, 3.f,
444  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
445  8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
446  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
447  8.f, 10.f, 20.f, 10.f, 20.f, 10.f, 12.f,
448  4.f, 5.f, 10.f, 5.f, 10.f, 5.f, 6.f,
449  7.f, 8.f, 16.f, 8.f, 16.f, 8.f, 9.f
450  };
451 
452  if (biasEnabled)
453  {
454  // apply bias to expected output data
455  std::transform(expectedOutputData.begin(), expectedOutputData.end(), expectedOutputData.begin(),
456  [&](float f) -> float { return f + biasesData[0]; });
457  }
458 
460  descriptor.m_StrideX = 2;
461  descriptor.m_StrideY = 2;
462  descriptor.m_BiasEnabled = biasEnabled;
463  descriptor.m_DataLayout = layout;
464 
465  // swizzle data if needed
466  if (layout == armnn::DataLayout::NHWC)
467  {
468  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
469  }
470 
471  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
472  memoryManager,
473  descriptor,
474  inputInfo,
475  inputData,
476  outputInfo,
477  expectedOutputData,
478  weightsInfo,
479  weightsData,
480  biasesInfo,
481  biasesData);
482 }
483 
484 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
486  armnn::IWorkloadFactory& workloadFactory,
488  const armnn::DataLayout layout)
489 {
490  using namespace armnn;
491 
492  TensorShape inputShape = { 1, 1, 2, 2 };
493  TensorShape outputShape = { 1, 2, 5, 5 };
494 
495  // OIHW for NCHW; OHWI for NHWC
496  TensorShape weightsShape = { 2, 1, 3, 3 };
497  TensorShape biasesShape = { 2 };
498 
499  TensorInfo inputInfo(inputShape, ArmnnType);
500  TensorInfo outputInfo(outputShape, ArmnnType);
501  TensorInfo weightsInfo(weightsShape, ArmnnType);
502  TensorInfo biasesInfo(biasesShape, ArmnnBType);
503 
504  std::vector<float> inputData =
505  {
506  1.f, 2.f,
507  3.f, 4.f,
508  };
509 
510  std::vector<float> weightsData =
511  {
512  1.f, 3.f, 5.f,
513  7.f, 9.f, 11.f,
514  13.f, 15.f, 17.f,
515 
516  2.f, 4.f, 6.f,
517  8.f, 10.f, 12.f,
518  14.f, 16.f, 18.f
519  };
520 
521  std::vector<float> biasesData = { -1.5f, -2.0f };
522 
523  std::vector<float> expectedOutputData =
524  {
525  -0.5f, 1.5f, 5.5f, 4.5f, 8.5f,
526  5.5f, 7.5f, 23.5f, 16.5f, 20.5f,
527  14.5f, 22.5f, 60.5f, 40.5f, 52.5f,
528  19.5f, 25.5f, 59.5f, 34.5f, 42.5f,
529  37.5f, 43.5f, 101.5f, 58.5f, 66.5f,
530 
531  0.0f, 2.0f, 8.0f, 6.0f, 10.0f,
532  6.0f, 8.0f, 26.0f, 18.0f, 22.0f,
533  18.0f, 26.0f, 70.0f, 46.0f, 58.0f,
534  22.0f, 28.0f, 66.0f, 38.0f, 46.0f,
535  40.0f, 46.0f, 108.0f, 62.0f, 70.0f
536  };
537 
539  descriptor.m_StrideX = 2;
540  descriptor.m_StrideY = 2;
541  descriptor.m_BiasEnabled = true;
542  descriptor.m_DataLayout = layout;
543 
544  // swizzle data if needed
545  if (layout == armnn::DataLayout::NHWC)
546  {
547  SwizzleData(inputInfo, inputData, outputInfo, expectedOutputData, weightsInfo, weightsData);
548  }
549 
550  return TransposeConvolution2dTest<ArmnnType, ArmnnBType>(workloadFactory,
551  memoryManager,
552  descriptor,
553  inputInfo,
554  inputData,
555  outputInfo,
556  expectedOutputData,
557  weightsInfo,
558  weightsData,
559  biasesInfo,
560  biasesData);
561 }
562 
564  armnn::IWorkloadFactory& workloadFactory,
566  const armnn::DataLayout layout)
567 {
568  using namespace armnn;
569 
570  const DataType inputType = DataType::QAsymmU8;
571  const DataType kernelType = DataType::QSymmS8;
572  const DataType biasType = DataType::Signed32;
573 
574  TensorInfo inputInfo ({ 1, 1, 2, 2 }, inputType, 0.50f, 10);
575  TensorInfo outputInfo({ 1, 2, 5, 5 }, inputType, 0.50f, 10);
576 
577  const std::vector<float> quantScales{ 0.25f, 0.5f };
578  constexpr unsigned int quantDimension = 0;
579 
580  TensorInfo kernelInfo({ 2, 1, 3, 3 }, kernelType, quantScales, quantDimension);
581 
582  const std::vector<float> biasQuantScales{ 0.125f, 0.25f };
583  TensorInfo biasInfo({ 2 }, biasType, biasQuantScales, quantDimension);
584 
585  std::vector<uint8_t> inputData =
586  {
587  12, 14,
588  16, 18
589  };
590 
591  std::vector<int8_t> kernelData =
592  {
593  4, 12, 20,
594  28, 36, 44,
595  52, 60, 68,
596 
597  4, 8, 12,
598  16, 20, 24,
599  28, 32, 36
600  };
601 
602  std::vector<int32_t> biasData = { -12, -8 };
603 
604  std::vector<uint8_t> expectedOutputData =
605  {
606  9, 13, 21, 19, 27,
607  21, 25, 57, 43, 51,
608  39, 55, 131, 91, 115,
609  49, 61, 129, 79, 95,
610  85, 97, 213, 127, 143,
611 
612  10, 14, 26, 22, 30,
613  22, 26, 62, 46, 54,
614  46, 62, 150, 102, 126,
615  54, 66, 142, 86, 102,
616  90, 102, 226, 134, 150
617  };
618 
619  if (layout == DataLayout::NHWC)
620  {
621  PermuteTensorNchwToNhwc(inputInfo, inputData);
622  PermuteTensorNchwToNhwc(kernelInfo, kernelData);
623  PermuteTensorNchwToNhwc(outputInfo, expectedOutputData);
624  }
625 
627  descriptor.m_StrideX = 2;
628  descriptor.m_StrideY = 2;
629  descriptor.m_BiasEnabled = true;
630  descriptor.m_DataLayout = layout;
631 
633  std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
634  std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
636 
637  WorkloadInfo workloadInfo;
638  ScopedCpuTensorHandle weightTensor(kernelInfo);
639  ScopedCpuTensorHandle biasTensor(biasInfo);
640 
641  AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
642  AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
643 
645  queueDescriptor.m_Parameters = descriptor;
646  queueDescriptor.m_Weight = &weightTensor;
647  queueDescriptor.m_Bias = &biasTensor;
648 
649  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
650  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
651 
652  std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
653  inputHandle->Allocate();
654  outputHandle->Allocate();
655 
656  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
657 
658  ExecuteWorkload(*workload, memoryManager);
659 
660  LayerTestResult<uint8_t, 4> ret(outputInfo);
661  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
662  ret.outputExpected = MakeTensor<uint8_t, 4>(outputInfo, expectedOutputData);
663 
664  return ret;
665 }
666 
667 //
668 // Explicit template specializations
669 //
670 
672 SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
673  armnn::IWorkloadFactory& workloadFactory,
675  bool biasEnabled,
676  const armnn::DataLayout layout);
677 
679 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
680  armnn::IWorkloadFactory& workloadFactory,
682  bool biasEnabled,
683  const armnn::DataLayout layout);
684 
686 SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
687  armnn::IWorkloadFactory& workloadFactory,
689  bool biasEnabled,
690  const armnn::DataLayout layout);
691 
693 SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
694  armnn::IWorkloadFactory& workloadFactory,
696  bool biasEnabled,
697  const armnn::DataLayout layout);
698 
700 PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
701  armnn::IWorkloadFactory& workloadFactory,
703  bool biasEnabled,
704  const armnn::DataLayout layout);
705 
706 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
707 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
708  armnn::IWorkloadFactory& workloadFactory,
710  bool biasEnabled,
711  const armnn::DataLayout layout);
712 
713 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
714 PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
715  armnn::IWorkloadFactory& workloadFactory,
717  bool biasEnabled,
718  const armnn::DataLayout layout);
719 
720 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
721 PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
722  armnn::IWorkloadFactory& workloadFactory,
724  bool biasEnabled,
725  const armnn::DataLayout layout);
726 
727 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
728 StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
729  armnn::IWorkloadFactory& workloadFactory,
731  bool biasEnabled,
732  const armnn::DataLayout layout);
733 
734 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
735 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
736  armnn::IWorkloadFactory& workloadFactory,
738  bool biasEnabled,
739  const armnn::DataLayout layout);
740 
741 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
742 StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
743  armnn::IWorkloadFactory& workloadFactory,
745  bool biasEnabled,
746  const armnn::DataLayout layout);
747 
748 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
749 StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
750  armnn::IWorkloadFactory& workloadFactory,
752  bool biasEnabled,
753  const armnn::DataLayout layout);
754 
755 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
756 MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Float32>(
757  armnn::IWorkloadFactory& workloadFactory,
759  const armnn::DataLayout layout);
760 
761 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
762 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
763  armnn::IWorkloadFactory& workloadFactory,
765  const armnn::DataLayout layout);
766 
767 template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
768 MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
769  armnn::IWorkloadFactory& workloadFactory,
771  const armnn::DataLayout layout);
772 
773 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
774 MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
775  armnn::IWorkloadFactory& workloadFactory,
777  const armnn::DataLayout layout);
DataLayout
Definition: Types.hpp:49
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
LayerTestResult< T, 4 > PaddedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< T, 4 > MultiChannelTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
bool m_BiasEnabled
Enable/disable bias.
boost::multi_array< T, n > outputExpected
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:73
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
void PermuteTensorNchwToNhwc(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:470
float GetQuantizationScale() const
Definition: Tensor.cpp:453
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< T, 4 > SimpleTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
boost::multi_array< T, n > output
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< uint8_t, 4 > TransposeConvolution2dPerAxisQuantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > StridedTransposeConvolution2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool biasEnabled, const armnn::DataLayout layout)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)