ArmNN
 21.11
ArgMinMaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ArgMinMaxTestImpl.hpp"
7 
8 
12 
13 #include <test/TensorHelpers.hpp>
14 
15 namespace
16 {
17 
18 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
19 LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
20  armnn::IWorkloadFactory& workloadFactory,
22  const armnn::ITensorHandleFactory& tensorHandleFactory,
23  armnn::ArgMinMaxFunction argMinMaxFunction,
24  const armnn::TensorInfo inputTensorInfo,
25  const armnn::TensorInfo outputTensorInfo,
26  const std::vector<float>& inputData,
27  const std::vector<int32_t>& outputData,
28  int axis = 3)
29 {
30  std::vector<T> inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
31  std::vector<int32_t> actualOutput(outputTensorInfo.GetNumElements());
32 
33  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
34  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
35 
37  descriptor.m_Parameters.m_Function = argMinMaxFunction;
38  descriptor.m_Parameters.m_Axis = axis;
40 
41  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
42  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
43 
44  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateArgMinMax(descriptor, info);
45 
46  inputHandle->Allocate();
47  outputHandle->Allocate();
48 
49  CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
50 
51  workload->PostAllocationConfigure();
52  workload->Execute();
53 
54  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
55 
56  return LayerTestResult<int32_t, 3>(actualOutput,
57  outputData,
58  outputHandle->GetShape(),
59  outputTensorInfo.GetShape());
60 }
61 
62 } // namespace
63 
64 template<armnn::DataType ArmnnType, typename T>
66  armnn::IWorkloadFactory& workloadFactory,
68  const armnn::ITensorHandleFactory& tensorHandleFactory)
69 {
70  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
71  const armnn::TensorShape outputShape{ 1, 1, 1 };
72 
73  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
74 
75  if (armnn::IsQuantizedType<T>())
76  {
77  inputTensorInfo.SetQuantizationScale(1.0f);
78  inputTensorInfo.SetQuantizationOffset(0);
79  }
80 
81  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
82 
83  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
84  std::vector<int32_t> outputValues({ 3 });
85 
86  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
88  inputTensorInfo, outputTensorInfo,
89  inputValues, outputValues, -1); // axis -1 === 3
90 }
91 
92 template<armnn::DataType ArmnnType, typename T>
94  armnn::IWorkloadFactory& workloadFactory,
96  const armnn::ITensorHandleFactory& tensorHandleFactory)
97 {
98  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
99  const armnn::TensorShape outputShape{ 1, 1, 1 };
100 
101  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
102 
103  if (armnn::IsQuantizedType<T>())
104  {
105  inputTensorInfo.SetQuantizationScale(1.0f);
106  inputTensorInfo.SetQuantizationOffset(0);
107  }
108 
109  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
110 
111  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
112  std::vector<int32_t> outputValues({ 1 });
113 
114  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
116  inputTensorInfo, outputTensorInfo,
117  inputValues, outputValues, 3);
118 }
119 
120 template<armnn::DataType ArmnnType, typename T>
122  armnn::IWorkloadFactory& workloadFactory,
124  const armnn::ITensorHandleFactory& tensorHandleFactory)
125 {
126  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
127  const armnn::TensorShape outputShape{ 1, 2, 4 };
128 
129  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
130 
131  if (armnn::IsQuantizedType<T>())
132  {
133  inputTensorInfo.SetQuantizationScale(1.0f);
134  inputTensorInfo.SetQuantizationOffset(0);
135  }
136 
137  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
138 
139  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
140  5.0f, 6.0f, 7.0f, 8.0f,
141 
142  10.0f, 20.0f, 30.0f, 40.0f,
143  50.0f, 60.0f, 70.0f, 80.0f,
144 
145  100.0f, 200.0f, 300.0f, 400.0f,
146  500.0f, 600.0f, 700.0f, 800.0f });
147  std::vector<int32_t> outputValues({ 0, 0, 0, 0,
148  0, 0, 0, 0 });
149 
150  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
152  inputTensorInfo, outputTensorInfo,
153  inputValues, outputValues, 1);
154 }
155 
156 template<armnn::DataType ArmnnType, typename T>
158  armnn::IWorkloadFactory& workloadFactory,
160  const armnn::ITensorHandleFactory& tensorHandleFactory)
161 {
162  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
163  const armnn::TensorShape outputShape{ 1, 2, 4 };
164 
165  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
166 
167  if (armnn::IsQuantizedType<T>())
168  {
169  inputTensorInfo.SetQuantizationScale(1.0f);
170  inputTensorInfo.SetQuantizationOffset(0);
171  }
172 
173  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
174 
175  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
176  5.0f, 6.0f, 7.0f, 8.0f,
177 
178  10.0f, 20.0f, 30.0f, 40.0f,
179  50.0f, 60.0f, 70.0f, 80.0f,
180 
181  100.0f, 200.0f, 300.0f, 400.0f,
182  500.0f, 600.0f, 700.0f, 800.0f });
183  std::vector<int32_t> outputValues({ 2, 2, 2, 2,
184  2, 2, 2, 2 });
185 
186  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
188  inputTensorInfo, outputTensorInfo,
189  inputValues, outputValues, 1);
190 }
191 
192 template<armnn::DataType ArmnnType, typename T>
194  armnn::IWorkloadFactory& workloadFactory,
196  const armnn::ITensorHandleFactory& tensorHandleFactory)
197 {
198  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
199  const armnn::TensorShape outputShape{ 1, 3, 4 };
200 
201  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
202  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
203 
204  if (armnn::IsQuantizedType<T>())
205  {
206  inputTensorInfo.SetQuantizationScale(1.0f);
207  inputTensorInfo.SetQuantizationOffset(0);
208  }
209 
210  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
211  5.0f, 6.0f, 7.0f, 8.0f,
212 
213  10.0f, 20.0f, 30.0f, 40.0f,
214  50.0f, 60.0f, 70.0f, 80.0f,
215 
216  100.0f, 200.0f, 300.0f, 400.0f,
217  500.0f, 600.0f, 700.0f, 800.0f });
218  std::vector<int32_t> outputValues({ 1, 1, 1, 1,
219  1, 1, 1, 1,
220  1, 1, 1, 1 });
221 
222  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
224  inputTensorInfo, outputTensorInfo,
225  inputValues, outputValues, 2);
226 }
227 
228 template<armnn::DataType ArmnnType, typename T>
230  armnn::IWorkloadFactory& workloadFactory,
232  const armnn::ITensorHandleFactory& tensorHandleFactory)
233 {
234  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
235  const armnn::TensorShape outputShape{ 1, 3, 2 };
236 
237  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
238  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
239 
240  if (armnn::IsQuantizedType<T>())
241  {
242  inputTensorInfo.SetQuantizationScale(1.0f);
243  inputTensorInfo.SetQuantizationOffset(0);
244  }
245 
246  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
247  5.0f, 6.0f, 7.0f, 8.0f,
248 
249  10.0f, 20.0f, 30.0f, 40.0f,
250  50.0f, 60.0f, 70.0f, 80.0f,
251 
252  100.0f, 200.0f, 300.0f, 400.0f,
253  500.0f, 600.0f, 700.0f, 800.0f });
254  std::vector<int32_t> outputValues({ 0, 0,
255  0, 0,
256  0, 0 });
257 
258  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
260  inputTensorInfo, outputTensorInfo,
261  inputValues, outputValues, 3);
262 }
263 
264 
265 // Explicit template specializations
266 
268 ArgMaxSimpleTest<armnn::DataType::Float32>(
269  armnn::IWorkloadFactory& workloadFactory,
271  const armnn::ITensorHandleFactory& tensorHandleFactory);
272 
274 ArgMaxSimpleTest<armnn::DataType::Float16>(
275  armnn::IWorkloadFactory& workloadFactory,
277  const armnn::ITensorHandleFactory& tensorHandleFactory);
278 
280 ArgMaxSimpleTest<armnn::DataType::QAsymmS8>(
281  armnn::IWorkloadFactory& workloadFactory,
283  const armnn::ITensorHandleFactory& tensorHandleFactory);
284 
286 ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
287  armnn::IWorkloadFactory& workloadFactory,
289  const armnn::ITensorHandleFactory& tensorHandleFactory);
290 
292 ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
293  armnn::IWorkloadFactory& workloadFactory,
295  const armnn::ITensorHandleFactory& tensorHandleFactory);
296 
298 ArgMaxSimpleTest<armnn::DataType::Signed32>(
299  armnn::IWorkloadFactory& workloadFactory,
301  const armnn::ITensorHandleFactory& tensorHandleFactory);
302 
304 ArgMinSimpleTest<armnn::DataType::Float32>(
305  armnn::IWorkloadFactory& workloadFactory,
307  const armnn::ITensorHandleFactory& tensorHandleFactory);
308 
310 ArgMinSimpleTest<armnn::DataType::Float16>(
311  armnn::IWorkloadFactory& workloadFactory,
313  const armnn::ITensorHandleFactory& tensorHandleFactory);
314 
316 ArgMinSimpleTest<armnn::DataType::QAsymmS8>(
317  armnn::IWorkloadFactory& workloadFactory,
319  const armnn::ITensorHandleFactory& tensorHandleFactory);
320 
322 ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
323  armnn::IWorkloadFactory& workloadFactory,
325  const armnn::ITensorHandleFactory& tensorHandleFactory);
326 
328 ArgMinSimpleTest<armnn::DataType::QSymmS16>(
329  armnn::IWorkloadFactory& workloadFactory,
331  const armnn::ITensorHandleFactory& tensorHandleFactory);
332 
334 ArgMinSimpleTest<armnn::DataType::Signed32>(
335  armnn::IWorkloadFactory& workloadFactory,
337  const armnn::ITensorHandleFactory& tensorHandleFactory);
338 
340 ArgMinChannelTest<armnn::DataType::Float32>(
341  armnn::IWorkloadFactory& workloadFactory,
343  const armnn::ITensorHandleFactory& tensorHandleFactory);
344 
346 ArgMinChannelTest<armnn::DataType::Float16>(
347  armnn::IWorkloadFactory& workloadFactory,
349  const armnn::ITensorHandleFactory& tensorHandleFactory);
350 
352 ArgMinChannelTest<armnn::DataType::QAsymmS8>(
353  armnn::IWorkloadFactory& workloadFactory,
355  const armnn::ITensorHandleFactory& tensorHandleFactory);
356 
358 ArgMinChannelTest<armnn::DataType::QAsymmU8>(
359  armnn::IWorkloadFactory& workloadFactory,
361  const armnn::ITensorHandleFactory& tensorHandleFactory);
362 
364 ArgMinChannelTest<armnn::DataType::QSymmS16>(
365  armnn::IWorkloadFactory& workloadFactory,
367  const armnn::ITensorHandleFactory& tensorHandleFactory);
368 
370 ArgMinChannelTest<armnn::DataType::Signed32>(
371  armnn::IWorkloadFactory& workloadFactory,
373  const armnn::ITensorHandleFactory& tensorHandleFactory);
374 
376 ArgMaxChannelTest<armnn::DataType::Float32>(
377  armnn::IWorkloadFactory& workloadFactory,
379  const armnn::ITensorHandleFactory& tensorHandleFactory);
380 
382 ArgMaxChannelTest<armnn::DataType::Float16>(
383  armnn::IWorkloadFactory& workloadFactory,
385  const armnn::ITensorHandleFactory& tensorHandleFactory);
386 
388 ArgMaxChannelTest<armnn::DataType::QAsymmS8>(
389  armnn::IWorkloadFactory& workloadFactory,
391  const armnn::ITensorHandleFactory& tensorHandleFactory);
392 
394 ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
395  armnn::IWorkloadFactory& workloadFactory,
397  const armnn::ITensorHandleFactory& tensorHandleFactory);
398 
400 ArgMaxChannelTest<armnn::DataType::QSymmS16>(
401  armnn::IWorkloadFactory& workloadFactory,
403  const armnn::ITensorHandleFactory& tensorHandleFactory);
404 
406 ArgMaxChannelTest<armnn::DataType::Signed32>(
407  armnn::IWorkloadFactory& workloadFactory,
409  const armnn::ITensorHandleFactory& tensorHandleFactory);
410 
412 ArgMaxHeightTest<armnn::DataType::Float32>(
413  armnn::IWorkloadFactory& workloadFactory,
415  const armnn::ITensorHandleFactory& tensorHandleFactory);
416 
418 ArgMaxHeightTest<armnn::DataType::Float16>(
419  armnn::IWorkloadFactory& workloadFactory,
421  const armnn::ITensorHandleFactory& tensorHandleFactory);
422 
424 ArgMaxHeightTest<armnn::DataType::Signed32>(
425  armnn::IWorkloadFactory& workloadFactory,
427  const armnn::ITensorHandleFactory& tensorHandleFactory);
428 
430 ArgMaxHeightTest<armnn::DataType::QAsymmS8>(
431  armnn::IWorkloadFactory& workloadFactory,
433  const armnn::ITensorHandleFactory& tensorHandleFactory);
434 
436 ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
437  armnn::IWorkloadFactory& workloadFactory,
439  const armnn::ITensorHandleFactory& tensorHandleFactory);
440 
442 ArgMinWidthTest<armnn::DataType::Float32>(
443  armnn::IWorkloadFactory& workloadFactory,
445  const armnn::ITensorHandleFactory& tensorHandleFactory);
446 
448 ArgMinWidthTest<armnn::DataType::Float16>(
449  armnn::IWorkloadFactory& workloadFactory,
451  const armnn::ITensorHandleFactory& tensorHandleFactory);
452 
454 ArgMinWidthTest<armnn::DataType::Signed32>(
455  armnn::IWorkloadFactory& workloadFactory,
457  const armnn::ITensorHandleFactory& tensorHandleFactory);
458 
460 ArgMinWidthTest<armnn::DataType::QAsymmS8>(
461  armnn::IWorkloadFactory& workloadFactory,
463  const armnn::ITensorHandleFactory& tensorHandleFactory);
464 
466 ArgMinWidthTest<armnn::DataType::QAsymmU8>(
467  armnn::IWorkloadFactory& workloadFactory,
469  const armnn::ITensorHandleFactory& tensorHandleFactory);
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
LayerTestResult< int32_t, 3 > ArgMaxSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
LayerTestResult< int32_t, 3 > ArgMaxHeightTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int32_t, 3 > ArgMinChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
ArgMinMaxFunction
Definition: Types.hpp:89
Contains information about TensorInfos of a layer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
LayerTestResult< int32_t, 3 > ArgMinSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< int32_t, 3 > ArgMinWidthTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int32_t, 3 > ArgMaxChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)