ArmNN
 20.02
PermuteTestImpl.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <ResolveType.hpp>
9 
10 
13 
15 
16 #include <test/TensorHelpers.hpp>
17 
18 template<typename T>
20  armnn::IWorkloadFactory& workloadFactory,
22  armnn::PermuteDescriptor descriptor,
23  armnn::TensorInfo inputTensorInfo,
24  armnn::TensorInfo outputTensorInfo,
25  const std::vector<T>& inputData,
26  const std::vector<T>& outputExpectedData)
27 {
28  IgnoreUnused(memoryManager);
29  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
30 
31  LayerTestResult<T, 4> ret(outputTensorInfo);
32  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
33 
34  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
35  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
36 
38  data.m_Parameters = descriptor;
40  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
41  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
42 
43  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(data, info);
44 
45  inputHandle->Allocate();
46  outputHandle->Allocate();
47 
48  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
49 
50  workload->Execute();
51 
52  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
53 
54  return ret;
55 }
56 
57 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
59  armnn::IWorkloadFactory& workloadFactory,
61 {
62  armnn::TensorInfo inputTensorInfo;
63  armnn::TensorInfo outputTensorInfo;
64 
65  unsigned int inputShape[] = { 1, 2, 2, 2 };
66  unsigned int outputShape[] = { 1, 2, 2, 2 };
67 
68  armnn::PermuteDescriptor descriptor;
69  descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
70 
71  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
72  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
73 
74  // Set quantization parameters if the requested type is a quantized type.
75  float qScale = 0.5f;
76  int32_t qOffset = 5;
77  if(armnn::IsQuantizedType<T>())
78  {
79  inputTensorInfo.SetQuantizationScale(qScale);
80  inputTensorInfo.SetQuantizationOffset(qOffset);
81  outputTensorInfo.SetQuantizationScale(qScale);
82  outputTensorInfo.SetQuantizationOffset(qOffset);
83  }
84 
85  std::vector<T> input = armnnUtils::QuantizedVector<T>(
86  {
87  1, 2,
88  3, 4,
89  5, 6,
90  7, 8
91  },
92  qScale, qOffset);
93 
94  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
95  {
96  1, 5, 2, 6,
97  3, 7, 4, 8
98  },
99  qScale, qOffset);
100 
101  return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
102  descriptor, inputTensorInfo,
103  outputTensorInfo, input, outputExpected);
104 }
105 
106 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
108  armnn::IWorkloadFactory& workloadFactory,
110 {
111  armnn::TensorInfo inputTensorInfo;
112  armnn::TensorInfo outputTensorInfo;
113 
114  unsigned int inputShape[] = { 1, 2, 2, 3 };
115  unsigned int outputShape[] = { 1, 3, 2, 2 };
116 
117  armnn::PermuteDescriptor descriptor;
118  descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
119 
120  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
121  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
122 
123  // Set quantization parameters if the requested type is a quantized type.
124  float qScale = 0.5f;
125  int32_t qOffset = 5;
126  if(armnn::IsQuantizedType<T>())
127  {
128  inputTensorInfo.SetQuantizationScale(qScale);
129  inputTensorInfo.SetQuantizationOffset(qOffset);
130  outputTensorInfo.SetQuantizationScale(qScale);
131  outputTensorInfo.SetQuantizationOffset(qOffset);
132  }
133 
134  std::vector<T> input = armnnUtils::QuantizedVector<T>(
135  {
136  1, 2, 3,
137  11, 12, 13,
138  21, 22, 23,
139  31, 32, 33
140  },
141  qScale, qOffset);
142 
143  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
144  {
145  1, 11, 21, 31,
146  2, 12, 22, 32,
147  3, 13, 23, 33
148  },
149  qScale, qOffset);
150 
151  return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
152  descriptor, inputTensorInfo,
153  outputTensorInfo, input, outputExpected);
154 }
155 
156 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
158  armnn::IWorkloadFactory& workloadFactory,
160 {
161  armnn::TensorInfo inputTensorInfo;
162  armnn::TensorInfo outputTensorInfo;
163 
164  unsigned int inputShape[] = { 1, 3, 2, 2 };
165  unsigned int outputShape[] = { 1, 2, 2, 3 };
166 
167  armnn::PermuteDescriptor descriptor;
168  descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
169 
170  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
171  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
172 
173  // Set quantization parameters if the requested type is a quantized type.
174  float qScale = 0.5f;
175  int32_t qOffset = 5;
176  if(armnn::IsQuantizedType<T>())
177  {
178  inputTensorInfo.SetQuantizationScale(qScale);
179  inputTensorInfo.SetQuantizationOffset(qOffset);
180  outputTensorInfo.SetQuantizationScale(qScale);
181  outputTensorInfo.SetQuantizationOffset(qOffset);
182  }
183 
184  std::vector<T> input = armnnUtils::QuantizedVector<T>(
185  {
186  1, 11, 21, 31,
187  2, 12, 22, 32,
188  3, 13, 23, 33
189  },
190  qScale, qOffset);
191 
192  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
193  {
194  1, 2, 3,
195  11, 12, 13,
196  21, 22, 23,
197  31, 32, 33,
198  },
199  qScale, qOffset);
200 
201  return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
202  descriptor, inputTensorInfo,
203  outputTensorInfo, input, outputExpected);
204 }
205 
206 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
208  armnn::IWorkloadFactory& workloadFactory,
210 {
211  armnn::TensorInfo inputTensorInfo;
212  armnn::TensorInfo outputTensorInfo;
213 
214  unsigned int inputShape[] = { 1, 2, 3, 3 };
215  unsigned int outputShape[] = { 1, 3, 2, 3 };
216 
217  armnn::PermuteDescriptor descriptor;
218  descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
219 
220  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
221  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
222 
223  // Set quantization parameters if the requested type is a quantized type.
224  float qScale = 0.5f;
225  int32_t qOffset = 5;
226  if(armnn::IsQuantizedType<T>())
227  {
228  inputTensorInfo.SetQuantizationScale(qScale);
229  inputTensorInfo.SetQuantizationOffset(qOffset);
230  outputTensorInfo.SetQuantizationScale(qScale);
231  outputTensorInfo.SetQuantizationOffset(qOffset);
232  }
233 
234  std::vector<T> input = armnnUtils::QuantizedVector<T>(
235  {
236  1, 2, 3,
237  11, 12, 13,
238  21, 22, 23,
239  31, 32, 33,
240  41, 42, 43,
241  51, 52, 53
242  },
243  qScale, qOffset);
244 
245  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
246  {
247  1, 11, 21, 31, 41, 51,
248  2, 12, 22, 32, 42, 52,
249  3, 13, 23, 33, 43, 53
250  },
251  qScale, qOffset);
252 
253  return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
254  descriptor, inputTensorInfo,
255  outputTensorInfo, input, outputExpected);
256 }
LayerTestResult< T, 4 > PermuteValueSet3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > SimplePermuteTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::PermuteDescriptor descriptor, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData)
LayerTestResult< T, 4 > PermuteValueSet1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
LayerTestResult< T, 4 > SimplePermuteTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > PermuteValueSet2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:275
A PermuteDescriptor for the PermuteLayer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)