ArmNN
 20.08
SoftmaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "SoftmaxTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
13 
16 
17 #include <test/TensorHelpers.hpp>
18 
19 #include <algorithm>
20 
21 namespace
22 {
23 
24 struct Simple3dSoftmaxOutputData
25 {
26  const std::vector<float> outputData =
27  {
28  0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
29  0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
30  };
31 
32  const armnn::TensorShape inputShape{ 1, 8, 1 };
33 
34  const std::vector<float> inputData =
35  {
36  0.0f, 1.0f, 0.0f, 0.0f,
37  0.5f, 0.0f, 0.0f, 0.0f,
38  };
39 };
40 
41 struct Simple4dSoftmaxData
42 {
43  const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
44 
45  const std::vector<float> outputData =
46  {
47  0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
48  0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
49  };
50 
51  const std::vector<float> inputData =
52  {
53  0.0f, 1.0f, 0.0f, 0.0f,
54  0.5f, 0.0f, 0.0f, 0.0f
55  };
56 };
57 
58 template<armnn::DataType ArmnnType, std::size_t n, typename T = armnn::ResolveType<ArmnnType>>
59 LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
60  armnn::IWorkloadFactory& workloadFactory,
62  float beta,
63  const armnn::TensorShape& inputShape,
64  const std::vector<float>& outputData,
65  const std::vector<float>& inputData,
66  int axis = -1)
67 {
68  IgnoreUnused(memoryManager);
69  using std::exp;
70 
71  const float qScale = 1.f / 256.f;
72  const int qOffset = 0;
73 
74  armnn::TensorInfo inputTensorInfo;
75  armnn::TensorInfo outputTensorInfo;
76 
77  inputTensorInfo = armnn::TensorInfo(inputShape, ArmnnType);
78  inputTensorInfo.SetQuantizationScale(qScale);
79  inputTensorInfo.SetQuantizationOffset(qOffset);
80 
81  outputTensorInfo = armnn::TensorInfo(inputShape, ArmnnType);
82  outputTensorInfo.SetQuantizationScale(qScale);
83  outputTensorInfo.SetQuantizationOffset(qOffset);
84 
85  LayerTestResult<T, n> ret(outputTensorInfo);
86 
87  // Each row is independently softmax'd.
88  auto input = MakeTensor<T, n>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
89 
91  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
92  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
94 
96  data.m_Parameters.m_Beta = beta;
97  data.m_Parameters.m_Axis = axis;
98 
100  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
101  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
102 
103  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
104 
105  inputHandle->Allocate();
106  outputHandle->Allocate();
107  CopyDataToITensorHandle(inputHandle.get(), input.origin());
108 
109  ARMNN_ASSERT(workload);
110 
111  ExecuteWorkload(*workload, memoryManager);
112 
113  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
114 
115  std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputData, qScale, qOffset);
116  ret.outputExpected = MakeTensor<T, n>(outputTensorInfo, expectedOutput);
117 
118  return ret;
119 }
120 
121 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
122 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
123  armnn::IWorkloadFactory& workloadFactory,
125  float beta)
126 {
127  using std::exp;
128  const armnn::TensorShape inputShape{ 2, 4 };
129 
130  float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
131  exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
132  float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
133  float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
134  exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
135  float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
136 
137  const std::vector<float> outputData = { x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
138  x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1 };
139 
140  const std::vector<float> inputData =
141  {
142  0.f, 1.f, 0.f, 0.f,
143  .5f, 0.f, 0.f, 0.f,
144  };
145 
146  return SimpleSoftmaxBaseTestImpl<ArmnnType, 2>(workloadFactory, memoryManager, beta,
147  inputShape, outputData, inputData);
148 }
149 
150 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
151 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
152  armnn::IWorkloadFactory& workloadFactory,
154  float beta,
155  int axis)
156 {
157  armnn::TensorShape inputShape;
158  std::vector<float> inputData;
159  std::vector<float> outputData;
160  switch (axis)
161  {
162  case -2:
163  case 0:
164  {
165  inputShape = {5, 2};
166 
167  inputData =
168  {
169  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
170  };
171 
172  outputData =
173  {
174  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
175  0.087144312427294f,
176  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
177  7.246299848982885e-08f
178  };
179  break;
180  }
181  case -1:
182  case 1:
183  {
184  inputShape = {2, 5};
185 
186  inputData =
187  {
188  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
189  };
190 
191  outputData =
192  {
193  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
194  7.246299848982885e-08f,
195  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
196  7.246299848982885e-08f
197  };
198  break;
199  }
200  }
201  return SimpleSoftmaxBaseTestImpl<ArmnnType, 2>(workloadFactory, memoryManager, beta,
202  inputShape, outputData, inputData, axis);
203 }
204 
205 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
206 LayerTestResult<T, 3> Simple3dSoftmaxTestImpl(
207  armnn::IWorkloadFactory& workloadFactory,
209  float beta,
210  const armnn::TensorShape& inputShape,
211  const std::vector<float>& outputData,
212  const std::vector<float>& inputData,
213  int axis = 1)
214 {
215  return SimpleSoftmaxBaseTestImpl<ArmnnType, 3>(workloadFactory, memoryManager, beta,
216  inputShape, outputData, inputData, axis);
217 }
218 
219 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
220 LayerTestResult<T, 4> Simple4dSoftmaxTestImpl(
221  armnn::IWorkloadFactory& workloadFactory,
223  float beta,
224  const armnn::TensorShape& inputShape,
225  const std::vector<float>& outputData,
226  const std::vector<float>& inputData,
227  int axis = 1)
228 {
229 
230  return SimpleSoftmaxBaseTestImpl<ArmnnType, 4>(workloadFactory, memoryManager, beta,
231  inputShape, outputData, inputData, axis);
232 }
233 
234 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
235 LayerTestResult<T, 2> CompareSoftmaxTestImpl(
236  armnn::IWorkloadFactory& workloadFactory,
238  armnn::IWorkloadFactory& refWorkloadFactory,
239  float beta)
240 {
241 
242  const int batchSize = 20;
243  const int channels = 30;
244 
245  armnn::TensorInfo inputTensorInfo;
246  armnn::TensorInfo outputTensorInfo;
247 
248  unsigned int inputShape[] = { batchSize, channels };
249 
250  inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
251  outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
252  float qScale = 1.f / 256.f;
253  int qOffset = 0;
254  inputTensorInfo.SetQuantizationScale(qScale);
255  inputTensorInfo.SetQuantizationOffset(qOffset);
256  outputTensorInfo.SetQuantizationScale(qScale);
257  outputTensorInfo.SetQuantizationOffset(qOffset);
258 
259 
260  LayerTestResult<T, 2> ret(outputTensorInfo);
261  auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
262 
264  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
265  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
267 
269  data.m_Parameters.m_Beta = beta;
270 
271  armnn::WorkloadInfo info;
272  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
273  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
274 
276  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
277  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
279 
280  armnn::SoftmaxQueueDescriptor refData = data;
281  armnn::WorkloadInfo refInfo = info;
282  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
283  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
284 
285  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
286  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
287 
288  outputHandleRef->Allocate();
289  inputHandleRef->Allocate();
290 
291  inputHandle->Allocate();
292  outputHandle->Allocate();
293 
294  CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
295  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
296 
297  ExecuteWorkload(*workload, memoryManager);
298 
299  workloadRef->Execute();
300 
301  CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
302  CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());
303 
304  return ret;
305 }
306 
307 } // anonymous namespace
308 
310  armnn::IWorkloadFactory& workloadFactory,
312  float beta)
313 {
314  return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
315 }
316 
318  armnn::IWorkloadFactory& workloadFactory,
320  float beta,
321  int axis)
322 {
323  return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
324 }
325 
327  armnn::IWorkloadFactory& workloadFactory,
329  float beta)
330 {
331  Simple3dSoftmaxOutputData data;
332  return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
333  data.inputShape, data.outputData, data.inputData);
334 }
335 
337  armnn::IWorkloadFactory& workloadFactory,
339  float beta,
340  int axis)
341 {
342  armnn::TensorShape inputShape;
343  std::vector<float> inputData;
344  std::vector<float> outputData;
345  switch (axis)
346  {
347  case -3:
348  case 0:
349  {
350  inputShape = {5, 2, 2};
351 
352  inputData =
353  {
354  17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
355 
356  15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
357  };
358 
359  outputData =
360  {
361  0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
362  0.236882800924671f,
363  0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
364  0.087144312427294f,
365 
366  0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
367  0.032058600957022f,
368  0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
369  7.246299848982885e-08f
370  };
371  break;
372  }
373  case -2:
374  case 1:
375  {
376  inputShape = {2, 5, 2};
377 
378  inputData =
379  {
380  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
381 
382  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
383  };
384 
385  outputData =
386  {
387  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
388  0.087144312427294f,
389  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
390  7.246299848982885e-08f,
391 
392  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
393  0.087144312427294f,
394  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
395  7.246299848982885e-08f
396  };
397  break;
398  }
399  case -1:
400  case 2:
401  {
402  inputShape = {2, 2, 5};
403 
404  inputData =
405  {
406  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
407  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
408  };
409 
410  outputData =
411  {
412  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
413  7.246299848982885e-08f,
414  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
415  7.246299848982885e-08f,
416 
417  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
418  7.246299848982885e-08f,
419  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
420  7.246299848982885e-08f
421  };
422  break;
423  }
424  }
425 
426  return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
427  inputShape, outputData, inputData, axis);
428 }
429 
431  armnn::IWorkloadFactory& workloadFactory,
433  float beta)
434 {
435  Simple4dSoftmaxData data;
436  return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
437  data.outputData, data.inputData);
438 }
439 
441  armnn::IWorkloadFactory& workloadFactory,
443  float beta,
444  int axis)
445 {
446  armnn::TensorShape inputShape;
447  std::vector<float> inputData;
448  std::vector<float> outputData;
449  switch (axis)
450  {
451  case -4:
452  case 0:
453  {
454  inputShape = {5, 2, 2, 2};
455 
456  inputData =
457  {
458  17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
459  16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
460  15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
461  14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
462  };
463 
464  outputData =
465  {
466  0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
467  0.643914213228014f,
468  0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
469  0.236882800924671f,
470  0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
471  0.236882800924671f,
472  0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
473  0.087144312427294f,
474 
475  0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
476  0.032058600957022f,
477  0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
478  0.032058600957022f,
479  0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
480  7.246299848982885e-08f,
481  7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
482  7.246299848982885e-08f, 7.246299848982885e-08f
483  };
484  break;
485  }
486  case -3:
487  case 1:
488  {
489  inputShape = {2, 5, 2, 2};
490 
491  inputData =
492  {
493  17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
494  15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
495  17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
496  15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
497  };
498 
499  outputData =
500  {
501  0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
502  0.236882800924671f,
503  0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
504  0.087144312427294f,
505  0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
506  0.032058600957022f,
507  0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
508  7.246299848982885e-08f,
509 
510 
511  0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
512  0.236882800924671f,
513  0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
514  0.087144312427294f,
515  0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
516  0.032058600957022f,
517  0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
518  7.246299848982885e-08f
519  };
520  break;
521  }
522  case -2:
523  case 2:
524  {
525  inputShape = {2, 2, 5, 2};
526 
527  inputData =
528  {
529  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
530  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
531  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
532  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
533  };
534 
535  outputData =
536  {
537  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
538  0.087144312427294f,
539  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
540  7.246299848982885e-08f,
541  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
542  0.087144312427294f,
543  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
544  7.246299848982885e-08f,
545 
546  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
547  0.087144312427294f,
548  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
549  7.246299848982885e-08f,
550  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
551  0.087144312427294f,
552  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
553  7.246299848982885e-08f
554  };
555  break;
556  }
557  case -1:
558  case 3:
559  {
560  inputShape = {2, 2, 2, 5};
561 
562  inputData =
563  {
564  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
565  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
566  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
567  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
568  };
569 
570  outputData =
571  {
572  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
573  7.246299848982885e-08f,
574  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
575  7.246299848982885e-08f,
576  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
577  7.246299848982885e-08f,
578  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
579  7.246299848982885e-08f,
580 
581  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
582  7.246299848982885e-08f,
583  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
584  7.246299848982885e-08f,
585  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
586  7.246299848982885e-08f,
587  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
588  7.246299848982885e-08f
589  };
590  break;
591  }
592  }
593 
594  return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(
595  workloadFactory,
596  memoryManager,
597  beta,
598  inputShape,
599  outputData,
600  inputData,
601  axis);
602 }
603 
605  armnn::IWorkloadFactory& workloadFactory,
607  float beta)
608 {
609  return SimpleSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta);
610 }
611 
613  armnn::IWorkloadFactory& workloadFactory,
615  float beta)
616 {
617  Simple3dSoftmaxOutputData data;
618  return Simple3dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
619  workloadFactory,
620  memoryManager,
621  beta,
622  data.inputShape,
623  data.outputData,
624  data.inputData);
625 }
626 
628  armnn::IWorkloadFactory& workloadFactory,
630  float beta)
631 {
632  Simple4dSoftmaxData data;
633 
634  return Simple4dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta,
635  data.inputShape, data.outputData, data.inputData);
636 }
637 
639  armnn::IWorkloadFactory& workloadFactory,
641  float beta)
642 {
643  return SimpleSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta);
644 }
645 
647  armnn::IWorkloadFactory& workloadFactory,
649  float beta)
650 {
651  Simple3dSoftmaxOutputData data;
652  return Simple3dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta,
653  data.inputShape, data.outputData, data.inputData);
654 }
655 
657  armnn::IWorkloadFactory& workloadFactory,
659  float beta)
660 {
661  Simple4dSoftmaxData data;
662  return Simple4dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta,
663  data.inputShape, data.outputData, data.inputData);
664 }
665 
667  armnn::IWorkloadFactory& workloadFactory,
669  float beta)
670 {
671  return SimpleSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta);
672 }
673 
675  armnn::IWorkloadFactory& workloadFactory,
677  float beta)
678 {
679  Simple3dSoftmaxOutputData data;
680  return Simple3dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
681  data.inputShape, data.outputData, data.inputData);
682 }
683 
685  armnn::IWorkloadFactory& workloadFactory,
687  float beta)
688 {
689  Simple4dSoftmaxData data;
690 
691  return Simple4dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
692  data.inputShape, data.outputData, data.inputData);
693 }
694 
696  armnn::IWorkloadFactory& workloadFactory,
698  armnn::IWorkloadFactory& refWorkloadFactory,
699  float beta)
700 {
701  return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
702  workloadFactory, memoryManager, refWorkloadFactory, beta);
703 }
704 
706  armnn::IWorkloadFactory& workloadFactory,
708  armnn::IWorkloadFactory& refWorkloadFactory,
709  float beta)
710 {
711  return CompareSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
712  workloadFactory, memoryManager, refWorkloadFactory, beta);
713 }
LayerTestResult< int16_t, 2 > SimpleSoftmaxUint16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< uint8_t, 3 > Simple3dSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
float m_Beta
Exponentiation value.
LayerTestResult< int16_t, 3 > Simple3dSoftmaxUint16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< armnn::Half, 3 > Simple3dSoftmaxFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
void IgnoreUnused(Ts &&...)
LayerTestResult< int16_t, 4 > Simple4dSoftmaxUint16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< uint8_t, 2 > SimpleSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< uint8_t, 4 > Simple4dSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< float, 3 > Simple3dSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
LayerTestResult< armnn::Half, 4 > Simple4dSoftmaxFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< float, 2 > CompareSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, float beta)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 3 > Simple3dAxisSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta, int axis)
LayerTestResult< float, 2 > SimpleAxisSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta, int axis)
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 2 > SimpleSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481
LayerTestResult< float, 4 > Simple4dAxisSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta, int axis)
LayerTestResult< float, 4 > Simple4dSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< uint8_t, 2 > CompareSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, float beta)
LayerTestResult< armnn::Half, 2 > SimpleSoftmaxFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)