ArmNN
 20.05
ArgMinMaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ArgMinMaxTestImpl.hpp"
7 
8 
12 
13 #include <test/TensorHelpers.hpp>
14 
15 namespace
16 {
17 
18 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
19 LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
20  armnn::IWorkloadFactory& workloadFactory,
22  armnn::ArgMinMaxFunction argMinMaxFunction,
23  const armnn::TensorInfo inputTensorInfo,
24  const armnn::TensorInfo outputTensorInfo,
25  const std::vector<float>& inputData,
26  const std::vector<int32_t>& outputData,
27  int axis = 3)
28 {
29  IgnoreUnused(memoryManager);
30  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
31 
32  LayerTestResult<int32_t, 3> result(outputTensorInfo);
33  result.outputExpected = MakeTensor<int32_t, 3>(outputTensorInfo, outputData);
34 
35  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
36  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
37 
39  descriptor.m_Parameters.m_Function = argMinMaxFunction;
40  descriptor.m_Parameters.m_Axis = axis;
42 
43  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
44  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
45 
46  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateArgMinMax(descriptor, info);
47 
48  inputHandle->Allocate();
49  outputHandle->Allocate();
50 
51  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
52 
53  workload->PostAllocationConfigure();
54  workload->Execute();
55 
56  CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
57 
58  return result;
59 }
60 
61 } // namespace
62 
63 template<armnn::DataType ArmnnType, typename T>
65  armnn::IWorkloadFactory& workloadFactory,
67 {
68  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
69  const armnn::TensorShape outputShape{ 1, 1, 1 };
70 
71  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
72 
73  if (armnn::IsQuantizedType<T>())
74  {
75  inputTensorInfo.SetQuantizationScale(1.0f);
76  inputTensorInfo.SetQuantizationOffset(0);
77  }
78 
79  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
80 
81  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
82  std::vector<int32_t> outputValues({ 3 });
83 
84  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
86  inputTensorInfo, outputTensorInfo,
87  inputValues, outputValues, -1); // axis -1 === 3
88 }
89 
90 template<armnn::DataType ArmnnType, typename T>
92  armnn::IWorkloadFactory& workloadFactory,
94 {
95  const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
96  const armnn::TensorShape outputShape{ 1, 1, 1 };
97 
98  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
99 
100  if (armnn::IsQuantizedType<T>())
101  {
102  inputTensorInfo.SetQuantizationScale(1.0f);
103  inputTensorInfo.SetQuantizationOffset(0);
104  }
105 
106  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
107 
108  std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
109  std::vector<int32_t> outputValues({ 1 });
110 
111  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
113  inputTensorInfo, outputTensorInfo,
114  inputValues, outputValues, 3);
115 }
116 
117 template<armnn::DataType ArmnnType, typename T>
119  armnn::IWorkloadFactory& workloadFactory,
121 {
122  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
123  const armnn::TensorShape outputShape{ 1, 2, 4 };
124 
125  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
126 
127  if (armnn::IsQuantizedType<T>())
128  {
129  inputTensorInfo.SetQuantizationScale(1.0f);
130  inputTensorInfo.SetQuantizationOffset(0);
131  }
132 
133  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
134 
135  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
136  5.0f, 6.0f, 7.0f, 8.0f,
137 
138  10.0f, 20.0f, 30.0f, 40.0f,
139  50.0f, 60.0f, 70.0f, 80.0f,
140 
141  100.0f, 200.0f, 300.0f, 400.0f,
142  500.0f, 600.0f, 700.0f, 800.0f });
143  std::vector<int32_t> outputValues({ 0, 0, 0, 0,
144  0, 0, 0, 0 });
145 
146  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
148  inputTensorInfo, outputTensorInfo,
149  inputValues, outputValues, 1);
150 }
151 
152 template<armnn::DataType ArmnnType, typename T>
154  armnn::IWorkloadFactory& workloadFactory,
156 {
157  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
158  const armnn::TensorShape outputShape{ 1, 2, 4 };
159 
160  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
161 
162  if (armnn::IsQuantizedType<T>())
163  {
164  inputTensorInfo.SetQuantizationScale(1.0f);
165  inputTensorInfo.SetQuantizationOffset(0);
166  }
167 
168  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
169 
170  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
171  5.0f, 6.0f, 7.0f, 8.0f,
172 
173  10.0f, 20.0f, 30.0f, 40.0f,
174  50.0f, 60.0f, 70.0f, 80.0f,
175 
176  100.0f, 200.0f, 300.0f, 400.0f,
177  500.0f, 600.0f, 700.0f, 800.0f });
178  std::vector<int32_t> outputValues({ 2, 2, 2, 2,
179  2, 2, 2, 2 });
180 
181  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
183  inputTensorInfo, outputTensorInfo,
184  inputValues, outputValues, 1);
185 }
186 
187 template<armnn::DataType ArmnnType, typename T>
189  armnn::IWorkloadFactory& workloadFactory,
191 {
192  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
193  const armnn::TensorShape outputShape{ 1, 3, 4 };
194 
195  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
196  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
197 
198  if (armnn::IsQuantizedType<T>())
199  {
200  inputTensorInfo.SetQuantizationScale(1.0f);
201  inputTensorInfo.SetQuantizationOffset(0);
202  }
203 
204  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
205  5.0f, 6.0f, 7.0f, 8.0f,
206 
207  10.0f, 20.0f, 30.0f, 40.0f,
208  50.0f, 60.0f, 70.0f, 80.0f,
209 
210  100.0f, 200.0f, 300.0f, 400.0f,
211  500.0f, 600.0f, 700.0f, 800.0f });
212  std::vector<int32_t> outputValues({ 1, 1, 1, 1,
213  1, 1, 1, 1,
214  1, 1, 1, 1 });
215 
216  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
218  inputTensorInfo, outputTensorInfo,
219  inputValues, outputValues, 2);
220 }
221 
222 template<armnn::DataType ArmnnType, typename T>
224  armnn::IWorkloadFactory& workloadFactory,
226 {
227  const armnn::TensorShape inputShape{ 1, 3, 2, 4};
228  const armnn::TensorShape outputShape{ 1, 3, 2 };
229 
230  armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
231  armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
232 
233  if (armnn::IsQuantizedType<T>())
234  {
235  inputTensorInfo.SetQuantizationScale(1.0f);
236  inputTensorInfo.SetQuantizationOffset(0);
237  }
238 
239  std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
240  5.0f, 6.0f, 7.0f, 8.0f,
241 
242  10.0f, 20.0f, 30.0f, 40.0f,
243  50.0f, 60.0f, 70.0f, 80.0f,
244 
245  100.0f, 200.0f, 300.0f, 400.0f,
246  500.0f, 600.0f, 700.0f, 800.0f });
247  std::vector<int32_t> outputValues({ 0, 0,
248  0, 0,
249  0, 0 });
250 
251  return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
253  inputTensorInfo, outputTensorInfo,
254  inputValues, outputValues, 3);
255 }
256 
257 
258 // Explicit template specializations
259 
261 ArgMaxSimpleTest<armnn::DataType::Float32>(
262  armnn::IWorkloadFactory& workloadFactory,
264 
266 ArgMaxSimpleTest<armnn::DataType::QAsymmS8>(
267  armnn::IWorkloadFactory& workloadFactory,
269 
271 ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
272  armnn::IWorkloadFactory& workloadFactory,
274 
276 ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
277  armnn::IWorkloadFactory& workloadFactory,
279 
281 ArgMaxSimpleTest<armnn::DataType::Signed32>(
282  armnn::IWorkloadFactory& workloadFactory,
284 
286 ArgMinSimpleTest<armnn::DataType::Float32>(
287  armnn::IWorkloadFactory& workloadFactory,
289 
291 ArgMinSimpleTest<armnn::DataType::QAsymmS8>(
292  armnn::IWorkloadFactory& workloadFactory,
294 
296 ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
297  armnn::IWorkloadFactory& workloadFactory,
299 
301 ArgMinSimpleTest<armnn::DataType::QSymmS16>(
302  armnn::IWorkloadFactory& workloadFactory,
304 
306 ArgMinSimpleTest<armnn::DataType::Signed32>(
307  armnn::IWorkloadFactory& workloadFactory,
309 
311 ArgMinChannelTest<armnn::DataType::Float32>(
312  armnn::IWorkloadFactory& workloadFactory,
314 
316 ArgMinChannelTest<armnn::DataType::QAsymmS8>(
317  armnn::IWorkloadFactory& workloadFactory,
319 
321 ArgMinChannelTest<armnn::DataType::QAsymmU8>(
322  armnn::IWorkloadFactory& workloadFactory,
324 
326 ArgMinChannelTest<armnn::DataType::QSymmS16>(
327  armnn::IWorkloadFactory& workloadFactory,
329 
331 ArgMinChannelTest<armnn::DataType::Signed32>(
332  armnn::IWorkloadFactory& workloadFactory,
334 
336 ArgMaxChannelTest<armnn::DataType::Float32>(
337  armnn::IWorkloadFactory& workloadFactory,
339 
341 ArgMaxChannelTest<armnn::DataType::QAsymmS8>(
342  armnn::IWorkloadFactory& workloadFactory,
344 
346 ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
347  armnn::IWorkloadFactory& workloadFactory,
349 
351 ArgMaxChannelTest<armnn::DataType::QSymmS16>(
352  armnn::IWorkloadFactory& workloadFactory,
354 
356 ArgMaxChannelTest<armnn::DataType::Signed32>(
357  armnn::IWorkloadFactory& workloadFactory,
359 
361 ArgMaxHeightTest<armnn::DataType::Float32>(
362  armnn::IWorkloadFactory& workloadFactory,
364 
366 ArgMaxHeightTest<armnn::DataType::Signed32>(
367  armnn::IWorkloadFactory& workloadFactory,
369 
371 ArgMaxHeightTest<armnn::DataType::QAsymmS8>(
372  armnn::IWorkloadFactory& workloadFactory,
374 
376 ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
377  armnn::IWorkloadFactory& workloadFactory,
379 
381 ArgMinWidthTest<armnn::DataType::Float32>(
382  armnn::IWorkloadFactory& workloadFactory,
384 
386 ArgMinWidthTest<armnn::DataType::Signed32>(
387  armnn::IWorkloadFactory& workloadFactory,
389 
391 ArgMinWidthTest<armnn::DataType::QAsymmS8>(
392  armnn::IWorkloadFactory& workloadFactory,
394 
396 ArgMinWidthTest<armnn::DataType::QAsymmU8>(
397  armnn::IWorkloadFactory& workloadFactory,
LayerTestResult< int32_t, 3 > ArgMinSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:64
LayerTestResult< int32_t, 3 > ArgMinChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int32_t, 3 > ArgMaxChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
LayerTestResult< int32_t, 3 > ArgMaxHeightTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int32_t, 3 > ArgMinWidthTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int32_t, 3 > ArgMaxSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
ArgMinMaxFunction
Definition: Types.hpp:71
Contains information about inputs and outputs to a layer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:66
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:276
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)