ArmNN
 20.11
ArgMinMaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ArgMinMaxTestImpl.hpp"
7 
8 
12 
13 #include <test/TensorHelpers.hpp>
14 
15 namespace
16 {
17 
18 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
19 LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
20  armnn::IWorkloadFactory& workloadFactory,
22  const armnn::ITensorHandleFactory& tensorHandleFactory,
23  armnn::ArgMinMaxFunction argMinMaxFunction,
24  const armnn::TensorInfo inputTensorInfo,
25  const armnn::TensorInfo outputTensorInfo,
26  const std::vector<float>& inputData,
27  const std::vector<int32_t>& outputData,
28  int axis = 3)
29 {
30  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
31 
32  LayerTestResult<int32_t, 3> result(outputTensorInfo);
33  result.outputExpected = MakeTensor<int32_t, 3>(outputTensorInfo, outputData);
34 
35  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
36  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
37 
39  descriptor.m_Parameters.m_Function = argMinMaxFunction;
40  descriptor.m_Parameters.m_Axis = axis;
42 
43  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
44  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
45 
46  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateArgMinMax(descriptor, info);
47 
48  inputHandle->Allocate();
49  outputHandle->Allocate();
50 
51  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
52 
53  workload->PostAllocationConfigure();
54  workload->Execute();
55 
56  CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
57 
58  return result;
59 }
60 
61 } // namespace
62 
63 template<armnn::DataType ArmnnType, typename T>
65  armnn::IWorkloadFactory& workloadFactory,
67  const armnn::ITensorHandleFactory& tensorHandleFactory)
68 {
69  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
70  const armnn::TensorShape outputShape{ 1, 1, 1 };
71 
72  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
73 
74  if (armnn::IsQuantizedType<T>())
75  {
76  inputTensorInfo.SetQuantizationScale(1.0f);
77  inputTensorInfo.SetQuantizationOffset(0);
78  }
79 
80  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
81 
82  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
83  std::vector<int32_t> outputValues({ 3 });
84 
85  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
87  inputTensorInfo, outputTensorInfo,
88  inputValues, outputValues, -1); // axis -1 === 3
89 }
90 
91 template<armnn::DataType ArmnnType, typename T>
93  armnn::IWorkloadFactory& workloadFactory,
95  const armnn::ITensorHandleFactory& tensorHandleFactory)
96 {
97  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
98  const armnn::TensorShape outputShape{ 1, 1, 1 };
99 
100  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
101 
102  if (armnn::IsQuantizedType<T>())
103  {
104  inputTensorInfo.SetQuantizationScale(1.0f);
105  inputTensorInfo.SetQuantizationOffset(0);
106  }
107 
108  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
109 
110  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
111  std::vector<int32_t> outputValues({ 1 });
112 
113  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
115  inputTensorInfo, outputTensorInfo,
116  inputValues, outputValues, 3);
117 }
118 
119 template<armnn::DataType ArmnnType, typename T>
121  armnn::IWorkloadFactory& workloadFactory,
123  const armnn::ITensorHandleFactory& tensorHandleFactory)
124 {
125  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
126  const armnn::TensorShape outputShape{ 1, 2, 4 };
127 
128  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
129 
130  if (armnn::IsQuantizedType<T>())
131  {
132  inputTensorInfo.SetQuantizationScale(1.0f);
133  inputTensorInfo.SetQuantizationOffset(0);
134  }
135 
136  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
137 
138  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
139  5.0f, 6.0f, 7.0f, 8.0f,
140 
141  10.0f, 20.0f, 30.0f, 40.0f,
142  50.0f, 60.0f, 70.0f, 80.0f,
143 
144  100.0f, 200.0f, 300.0f, 400.0f,
145  500.0f, 600.0f, 700.0f, 800.0f });
146  std::vector<int32_t> outputValues({ 0, 0, 0, 0,
147  0, 0, 0, 0 });
148 
149  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
151  inputTensorInfo, outputTensorInfo,
152  inputValues, outputValues, 1);
153 }
154 
155 template<armnn::DataType ArmnnType, typename T>
157  armnn::IWorkloadFactory& workloadFactory,
159  const armnn::ITensorHandleFactory& tensorHandleFactory)
160 {
161  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
162  const armnn::TensorShape outputShape{ 1, 2, 4 };
163 
164  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
165 
166  if (armnn::IsQuantizedType<T>())
167  {
168  inputTensorInfo.SetQuantizationScale(1.0f);
169  inputTensorInfo.SetQuantizationOffset(0);
170  }
171 
172  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
173 
174  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
175  5.0f, 6.0f, 7.0f, 8.0f,
176 
177  10.0f, 20.0f, 30.0f, 40.0f,
178  50.0f, 60.0f, 70.0f, 80.0f,
179 
180  100.0f, 200.0f, 300.0f, 400.0f,
181  500.0f, 600.0f, 700.0f, 800.0f });
182  std::vector<int32_t> outputValues({ 2, 2, 2, 2,
183  2, 2, 2, 2 });
184 
185  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
187  inputTensorInfo, outputTensorInfo,
188  inputValues, outputValues, 1);
189 }
190 
191 template<armnn::DataType ArmnnType, typename T>
193  armnn::IWorkloadFactory& workloadFactory,
195  const armnn::ITensorHandleFactory& tensorHandleFactory)
196 {
197  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
198  const armnn::TensorShape outputShape{ 1, 3, 4 };
199 
200  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
201  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
202 
203  if (armnn::IsQuantizedType<T>())
204  {
205  inputTensorInfo.SetQuantizationScale(1.0f);
206  inputTensorInfo.SetQuantizationOffset(0);
207  }
208 
209  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
210  5.0f, 6.0f, 7.0f, 8.0f,
211 
212  10.0f, 20.0f, 30.0f, 40.0f,
213  50.0f, 60.0f, 70.0f, 80.0f,
214 
215  100.0f, 200.0f, 300.0f, 400.0f,
216  500.0f, 600.0f, 700.0f, 800.0f });
217  std::vector<int32_t> outputValues({ 1, 1, 1, 1,
218  1, 1, 1, 1,
219  1, 1, 1, 1 });
220 
221  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
223  inputTensorInfo, outputTensorInfo,
224  inputValues, outputValues, 2);
225 }
226 
227 template<armnn::DataType ArmnnType, typename T>
229  armnn::IWorkloadFactory& workloadFactory,
231  const armnn::ITensorHandleFactory& tensorHandleFactory)
232 {
233  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
234  const armnn::TensorShape outputShape{ 1, 3, 2 };
235 
236  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
237  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
238 
239  if (armnn::IsQuantizedType<T>())
240  {
241  inputTensorInfo.SetQuantizationScale(1.0f);
242  inputTensorInfo.SetQuantizationOffset(0);
243  }
244 
245  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
246  5.0f, 6.0f, 7.0f, 8.0f,
247 
248  10.0f, 20.0f, 30.0f, 40.0f,
249  50.0f, 60.0f, 70.0f, 80.0f,
250 
251  100.0f, 200.0f, 300.0f, 400.0f,
252  500.0f, 600.0f, 700.0f, 800.0f });
253  std::vector<int32_t> outputValues({ 0, 0,
254  0, 0,
255  0, 0 });
256 
257  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager, tensorHandleFactory,
259  inputTensorInfo, outputTensorInfo,
260  inputValues, outputValues, 3);
261 }
262 
263 
264 // Explicit template specializations
265 
267 ArgMaxSimpleTest<armnn::DataType::Float32>(
268  armnn::IWorkloadFactory& workloadFactory,
270  const armnn::ITensorHandleFactory& tensorHandleFactory);
271 
273 ArgMaxSimpleTest<armnn::DataType::Float16>(
274  armnn::IWorkloadFactory& workloadFactory,
276  const armnn::ITensorHandleFactory& tensorHandleFactory);
277 
279 ArgMaxSimpleTest<armnn::DataType::QAsymmS8>(
280  armnn::IWorkloadFactory& workloadFactory,
282  const armnn::ITensorHandleFactory& tensorHandleFactory);
283 
285 ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
286  armnn::IWorkloadFactory& workloadFactory,
288  const armnn::ITensorHandleFactory& tensorHandleFactory);
289 
291 ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
292  armnn::IWorkloadFactory& workloadFactory,
294  const armnn::ITensorHandleFactory& tensorHandleFactory);
295 
297 ArgMaxSimpleTest<armnn::DataType::Signed32>(
298  armnn::IWorkloadFactory& workloadFactory,
300  const armnn::ITensorHandleFactory& tensorHandleFactory);
301 
303 ArgMinSimpleTest<armnn::DataType::Float32>(
304  armnn::IWorkloadFactory& workloadFactory,
306  const armnn::ITensorHandleFactory& tensorHandleFactory);
307 
309 ArgMinSimpleTest<armnn::DataType::Float16>(
310  armnn::IWorkloadFactory& workloadFactory,
312  const armnn::ITensorHandleFactory& tensorHandleFactory);
313 
315 ArgMinSimpleTest<armnn::DataType::QAsymmS8>(
316  armnn::IWorkloadFactory& workloadFactory,
318  const armnn::ITensorHandleFactory& tensorHandleFactory);
319 
321 ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
322  armnn::IWorkloadFactory& workloadFactory,
324  const armnn::ITensorHandleFactory& tensorHandleFactory);
325 
327 ArgMinSimpleTest<armnn::DataType::QSymmS16>(
328  armnn::IWorkloadFactory& workloadFactory,
330  const armnn::ITensorHandleFactory& tensorHandleFactory);
331 
333 ArgMinSimpleTest<armnn::DataType::Signed32>(
334  armnn::IWorkloadFactory& workloadFactory,
336  const armnn::ITensorHandleFactory& tensorHandleFactory);
337 
339 ArgMinChannelTest<armnn::DataType::Float32>(
340  armnn::IWorkloadFactory& workloadFactory,
342  const armnn::ITensorHandleFactory& tensorHandleFactory);
343 
345 ArgMinChannelTest<armnn::DataType::Float16>(
346  armnn::IWorkloadFactory& workloadFactory,
348  const armnn::ITensorHandleFactory& tensorHandleFactory);
349 
351 ArgMinChannelTest<armnn::DataType::QAsymmS8>(
352  armnn::IWorkloadFactory& workloadFactory,
354  const armnn::ITensorHandleFactory& tensorHandleFactory);
355 
357 ArgMinChannelTest<armnn::DataType::QAsymmU8>(
358  armnn::IWorkloadFactory& workloadFactory,
360  const armnn::ITensorHandleFactory& tensorHandleFactory);
361 
363 ArgMinChannelTest<armnn::DataType::QSymmS16>(
364  armnn::IWorkloadFactory& workloadFactory,
366  const armnn::ITensorHandleFactory& tensorHandleFactory);
367 
369 ArgMinChannelTest<armnn::DataType::Signed32>(
370  armnn::IWorkloadFactory& workloadFactory,
372  const armnn::ITensorHandleFactory& tensorHandleFactory);
373 
375 ArgMaxChannelTest<armnn::DataType::Float32>(
376  armnn::IWorkloadFactory& workloadFactory,
378  const armnn::ITensorHandleFactory& tensorHandleFactory);
379 
381 ArgMaxChannelTest<armnn::DataType::Float16>(
382  armnn::IWorkloadFactory& workloadFactory,
384  const armnn::ITensorHandleFactory& tensorHandleFactory);
385 
387 ArgMaxChannelTest<armnn::DataType::QAsymmS8>(
388  armnn::IWorkloadFactory& workloadFactory,
390  const armnn::ITensorHandleFactory& tensorHandleFactory);
391 
393 ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
394  armnn::IWorkloadFactory& workloadFactory,
396  const armnn::ITensorHandleFactory& tensorHandleFactory);
397 
399 ArgMaxChannelTest<armnn::DataType::QSymmS16>(
400  armnn::IWorkloadFactory& workloadFactory,
402  const armnn::ITensorHandleFactory& tensorHandleFactory);
403 
405 ArgMaxChannelTest<armnn::DataType::Signed32>(
406  armnn::IWorkloadFactory& workloadFactory,
408  const armnn::ITensorHandleFactory& tensorHandleFactory);
409 
411 ArgMaxHeightTest<armnn::DataType::Float32>(
412  armnn::IWorkloadFactory& workloadFactory,
414  const armnn::ITensorHandleFactory& tensorHandleFactory);
415 
417 ArgMaxHeightTest<armnn::DataType::Float16>(
418  armnn::IWorkloadFactory& workloadFactory,
420  const armnn::ITensorHandleFactory& tensorHandleFactory);
421 
423 ArgMaxHeightTest<armnn::DataType::Signed32>(
424  armnn::IWorkloadFactory& workloadFactory,
426  const armnn::ITensorHandleFactory& tensorHandleFactory);
427 
429 ArgMaxHeightTest<armnn::DataType::QAsymmS8>(
430  armnn::IWorkloadFactory& workloadFactory,
432  const armnn::ITensorHandleFactory& tensorHandleFactory);
433 
435 ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
436  armnn::IWorkloadFactory& workloadFactory,
438  const armnn::ITensorHandleFactory& tensorHandleFactory);
439 
441 ArgMinWidthTest<armnn::DataType::Float32>(
442  armnn::IWorkloadFactory& workloadFactory,
444  const armnn::ITensorHandleFactory& tensorHandleFactory);
445 
447 ArgMinWidthTest<armnn::DataType::Float16>(
448  armnn::IWorkloadFactory& workloadFactory,
450  const armnn::ITensorHandleFactory& tensorHandleFactory);
451 
453 ArgMinWidthTest<armnn::DataType::Signed32>(
454  armnn::IWorkloadFactory& workloadFactory,
456  const armnn::ITensorHandleFactory& tensorHandleFactory);
457 
459 ArgMinWidthTest<armnn::DataType::QAsymmS8>(
460  armnn::IWorkloadFactory& workloadFactory,
462  const armnn::ITensorHandleFactory& tensorHandleFactory);
463 
465 ArgMinWidthTest<armnn::DataType::QAsymmU8>(
466  armnn::IWorkloadFactory& workloadFactory,
468  const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult< int32_t, 3 > ArgMaxSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:65
LayerTestResult< int32_t, 3 > ArgMaxHeightTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int32_t, 3 > ArgMinChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
ArgMinMaxFunction
Definition: Types.hpp:72
Contains information about inputs and outputs to a layer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:67
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
LayerTestResult< int32_t, 3 > ArgMinSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int32_t, 3 > ArgMinWidthTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int32_t, 3 > ArgMaxChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)