ArmNN
 20.08
ArgMinMaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ArgMinMaxTestImpl.hpp"
7 
8 
12 
13 #include <test/TensorHelpers.hpp>
14 
15 namespace
16 {
17 
18 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
19 LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
20  armnn::IWorkloadFactory& workloadFactory,
22  armnn::ArgMinMaxFunction argMinMaxFunction,
23  const armnn::TensorInfo inputTensorInfo,
24  const armnn::TensorInfo outputTensorInfo,
25  const std::vector<float>& inputData,
26  const std::vector<int32_t>& outputData,
27  int axis = 3)
28 {
29  IgnoreUnused(memoryManager);
30  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
31 
32  LayerTestResult<int32_t, 3> result(outputTensorInfo);
33  result.outputExpected = MakeTensor<int32_t, 3>(outputTensorInfo, outputData);
34 
36  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
37  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
39 
41  descriptor.m_Parameters.m_Function = argMinMaxFunction;
42  descriptor.m_Parameters.m_Axis = axis;
44 
45  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
46  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
47 
48  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateArgMinMax(descriptor, info);
49 
50  inputHandle->Allocate();
51  outputHandle->Allocate();
52 
53  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
54 
55  workload->PostAllocationConfigure();
56  workload->Execute();
57 
58  CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
59 
60  return result;
61 }
62 
63 } // namespace
64 
65 template<armnn::DataType ArmnnType, typename T>
67  armnn::IWorkloadFactory& workloadFactory,
69 {
70  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
71  const armnn::TensorShape outputShape{ 1, 1, 1 };
72 
73  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
74 
75  if (armnn::IsQuantizedType<T>())
76  {
77  inputTensorInfo.SetQuantizationScale(1.0f);
78  inputTensorInfo.SetQuantizationOffset(0);
79  }
80 
81  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
82 
83  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
84  std::vector<int32_t> outputValues({ 3 });
85 
86  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
88  inputTensorInfo, outputTensorInfo,
89  inputValues, outputValues, -1); // axis -1 === 3
90 }
91 
92 template<armnn::DataType ArmnnType, typename T>
94  armnn::IWorkloadFactory& workloadFactory,
96 {
97  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
98  const armnn::TensorShape outputShape{ 1, 1, 1 };
99 
100  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
101 
102  if (armnn::IsQuantizedType<T>())
103  {
104  inputTensorInfo.SetQuantizationScale(1.0f);
105  inputTensorInfo.SetQuantizationOffset(0);
106  }
107 
108  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
109 
110  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
111  std::vector<int32_t> outputValues({ 1 });
112 
113  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
115  inputTensorInfo, outputTensorInfo,
116  inputValues, outputValues, 3);
117 }
118 
119 template<armnn::DataType ArmnnType, typename T>
121  armnn::IWorkloadFactory& workloadFactory,
123 {
124  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
125  const armnn::TensorShape outputShape{ 1, 2, 4 };
126 
127  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
128 
129  if (armnn::IsQuantizedType<T>())
130  {
131  inputTensorInfo.SetQuantizationScale(1.0f);
132  inputTensorInfo.SetQuantizationOffset(0);
133  }
134 
135  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
136 
137  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
138  5.0f, 6.0f, 7.0f, 8.0f,
139 
140  10.0f, 20.0f, 30.0f, 40.0f,
141  50.0f, 60.0f, 70.0f, 80.0f,
142 
143  100.0f, 200.0f, 300.0f, 400.0f,
144  500.0f, 600.0f, 700.0f, 800.0f });
145  std::vector<int32_t> outputValues({ 0, 0, 0, 0,
146  0, 0, 0, 0 });
147 
148  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
150  inputTensorInfo, outputTensorInfo,
151  inputValues, outputValues, 1);
152 }
153 
154 template<armnn::DataType ArmnnType, typename T>
156  armnn::IWorkloadFactory& workloadFactory,
158 {
159  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
160  const armnn::TensorShape outputShape{ 1, 2, 4 };
161 
162  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
163 
164  if (armnn::IsQuantizedType<T>())
165  {
166  inputTensorInfo.SetQuantizationScale(1.0f);
167  inputTensorInfo.SetQuantizationOffset(0);
168  }
169 
170  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
171 
172  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
173  5.0f, 6.0f, 7.0f, 8.0f,
174 
175  10.0f, 20.0f, 30.0f, 40.0f,
176  50.0f, 60.0f, 70.0f, 80.0f,
177 
178  100.0f, 200.0f, 300.0f, 400.0f,
179  500.0f, 600.0f, 700.0f, 800.0f });
180  std::vector<int32_t> outputValues({ 2, 2, 2, 2,
181  2, 2, 2, 2 });
182 
183  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
185  inputTensorInfo, outputTensorInfo,
186  inputValues, outputValues, 1);
187 }
188 
189 template<armnn::DataType ArmnnType, typename T>
191  armnn::IWorkloadFactory& workloadFactory,
193 {
194  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
195  const armnn::TensorShape outputShape{ 1, 3, 4 };
196 
197  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
198  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
199 
200  if (armnn::IsQuantizedType<T>())
201  {
202  inputTensorInfo.SetQuantizationScale(1.0f);
203  inputTensorInfo.SetQuantizationOffset(0);
204  }
205 
206  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
207  5.0f, 6.0f, 7.0f, 8.0f,
208 
209  10.0f, 20.0f, 30.0f, 40.0f,
210  50.0f, 60.0f, 70.0f, 80.0f,
211 
212  100.0f, 200.0f, 300.0f, 400.0f,
213  500.0f, 600.0f, 700.0f, 800.0f });
214  std::vector<int32_t> outputValues({ 1, 1, 1, 1,
215  1, 1, 1, 1,
216  1, 1, 1, 1 });
217 
218  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
220  inputTensorInfo, outputTensorInfo,
221  inputValues, outputValues, 2);
222 }
223 
224 template<armnn::DataType ArmnnType, typename T>
226  armnn::IWorkloadFactory& workloadFactory,
228 {
229  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
230  const armnn::TensorShape outputShape{ 1, 3, 2 };
231 
232  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
233  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
234 
235  if (armnn::IsQuantizedType<T>())
236  {
237  inputTensorInfo.SetQuantizationScale(1.0f);
238  inputTensorInfo.SetQuantizationOffset(0);
239  }
240 
241  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
242  5.0f, 6.0f, 7.0f, 8.0f,
243 
244  10.0f, 20.0f, 30.0f, 40.0f,
245  50.0f, 60.0f, 70.0f, 80.0f,
246 
247  100.0f, 200.0f, 300.0f, 400.0f,
248  500.0f, 600.0f, 700.0f, 800.0f });
249  std::vector<int32_t> outputValues({ 0, 0,
250  0, 0,
251  0, 0 });
252 
253  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
255  inputTensorInfo, outputTensorInfo,
256  inputValues, outputValues, 3);
257 }
258 
259 
260 // Explicit template specializations
261 
263 ArgMaxSimpleTest<armnn::DataType::Float32>(
264  armnn::IWorkloadFactory& workloadFactory,
266 
268 ArgMaxSimpleTest<armnn::DataType::Float16>(
269  armnn::IWorkloadFactory& workloadFactory,
271 
273 ArgMaxSimpleTest<armnn::DataType::QAsymmS8>(
274  armnn::IWorkloadFactory& workloadFactory,
276 
278 ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
279  armnn::IWorkloadFactory& workloadFactory,
281 
283 ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
284  armnn::IWorkloadFactory& workloadFactory,
286 
288 ArgMaxSimpleTest<armnn::DataType::Signed32>(
289  armnn::IWorkloadFactory& workloadFactory,
291 
293 ArgMinSimpleTest<armnn::DataType::Float32>(
294  armnn::IWorkloadFactory& workloadFactory,
296 
298 ArgMinSimpleTest<armnn::DataType::Float16>(
299  armnn::IWorkloadFactory& workloadFactory,
301 
303 ArgMinSimpleTest<armnn::DataType::QAsymmS8>(
304  armnn::IWorkloadFactory& workloadFactory,
306 
308 ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
309  armnn::IWorkloadFactory& workloadFactory,
311 
313 ArgMinSimpleTest<armnn::DataType::QSymmS16>(
314  armnn::IWorkloadFactory& workloadFactory,
316 
318 ArgMinSimpleTest<armnn::DataType::Signed32>(
319  armnn::IWorkloadFactory& workloadFactory,
321 
323 ArgMinChannelTest<armnn::DataType::Float32>(
324  armnn::IWorkloadFactory& workloadFactory,
326 
328 ArgMinChannelTest<armnn::DataType::Float16>(
329  armnn::IWorkloadFactory& workloadFactory,
331 
333 ArgMinChannelTest<armnn::DataType::QAsymmS8>(
334  armnn::IWorkloadFactory& workloadFactory,
336 
338 ArgMinChannelTest<armnn::DataType::QAsymmU8>(
339  armnn::IWorkloadFactory& workloadFactory,
341 
343 ArgMinChannelTest<armnn::DataType::QSymmS16>(
344  armnn::IWorkloadFactory& workloadFactory,
346 
348 ArgMinChannelTest<armnn::DataType::Signed32>(
349  armnn::IWorkloadFactory& workloadFactory,
351 
353 ArgMaxChannelTest<armnn::DataType::Float32>(
354  armnn::IWorkloadFactory& workloadFactory,
356 
358 ArgMaxChannelTest<armnn::DataType::Float16>(
359  armnn::IWorkloadFactory& workloadFactory,
361 
363 ArgMaxChannelTest<armnn::DataType::QAsymmS8>(
364  armnn::IWorkloadFactory& workloadFactory,
366 
368 ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
369  armnn::IWorkloadFactory& workloadFactory,
371 
373 ArgMaxChannelTest<armnn::DataType::QSymmS16>(
374  armnn::IWorkloadFactory& workloadFactory,
376 
378 ArgMaxChannelTest<armnn::DataType::Signed32>(
379  armnn::IWorkloadFactory& workloadFactory,
381 
383 ArgMaxHeightTest<armnn::DataType::Float32>(
384  armnn::IWorkloadFactory& workloadFactory,
386 
388 ArgMaxHeightTest<armnn::DataType::Float16>(
389  armnn::IWorkloadFactory& workloadFactory,
391 
393 ArgMaxHeightTest<armnn::DataType::Signed32>(
394  armnn::IWorkloadFactory& workloadFactory,
396 
398 ArgMaxHeightTest<armnn::DataType::QAsymmS8>(
399  armnn::IWorkloadFactory& workloadFactory,
401 
403 ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
404  armnn::IWorkloadFactory& workloadFactory,
406 
408 ArgMinWidthTest<armnn::DataType::Float32>(
409  armnn::IWorkloadFactory& workloadFactory,
411 
413 ArgMinWidthTest<armnn::DataType::Float16>(
414  armnn::IWorkloadFactory& workloadFactory,
416 
418 ArgMinWidthTest<armnn::DataType::Signed32>(
419  armnn::IWorkloadFactory& workloadFactory,
421 
423 ArgMinWidthTest<armnn::DataType::QAsymmS8>(
424  armnn::IWorkloadFactory& workloadFactory,
426 
428 ArgMinWidthTest<armnn::DataType::QAsymmU8>(
429  armnn::IWorkloadFactory& workloadFactory,
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< int32_t, 3 > ArgMinSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:64
LayerTestResult< int32_t, 3 > ArgMinChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int32_t, 3 > ArgMaxChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
LayerTestResult< int32_t, 3 > ArgMaxHeightTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int32_t, 3 > ArgMinWidthTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int32_t, 3 > ArgMaxSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
ArgMinMaxFunction
Definition: Types.hpp:71
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)