ArmNN
 20.05
SoftmaxTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "SoftmaxTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
13 
16 
17 #include <test/TensorHelpers.hpp>
18 
19 #include <algorithm>
20 
21 namespace
22 {
23 
24 struct Simple3dSoftmaxOutputData
25 {
26  const std::vector<float> outputData =
27  {
28  0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
29  0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
30  };
31 
32  const armnn::TensorShape inputShape{ 1, 8, 1 };
33 
34  const std::vector<float> inputData =
35  {
36  0.0f, 1.0f, 0.0f, 0.0f,
37  0.5f, 0.0f, 0.0f, 0.0f,
38  };
39 };
40 
41 struct Simple4dSoftmaxData
42 {
43  const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
44 
45  const std::vector<float> outputData =
46  {
47  0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
48  0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
49  };
50 
51  const std::vector<float> inputData =
52  {
53  0.0f, 1.0f, 0.0f, 0.0f,
54  0.5f, 0.0f, 0.0f, 0.0f
55  };
56 };
57 
58 template<armnn::DataType ArmnnType, std::size_t n, typename T = armnn::ResolveType<ArmnnType>>
59 LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
60  armnn::IWorkloadFactory& workloadFactory,
62  float beta,
63  const armnn::TensorShape& inputShape,
64  const std::vector<float>& outputData,
65  const std::vector<float>& inputData,
66  int axis = 1)
67 {
68  IgnoreUnused(memoryManager);
69  using std::exp;
70 
71  const float qScale = 1.f / 256.f;
72  const int qOffset = 0;
73 
74  armnn::TensorInfo inputTensorInfo;
75  armnn::TensorInfo outputTensorInfo;
76 
77  inputTensorInfo = armnn::TensorInfo(inputShape, ArmnnType);
78  inputTensorInfo.SetQuantizationScale(qScale);
79  inputTensorInfo.SetQuantizationOffset(qOffset);
80 
81  outputTensorInfo = armnn::TensorInfo(inputShape, ArmnnType);
82  outputTensorInfo.SetQuantizationScale(qScale);
83  outputTensorInfo.SetQuantizationOffset(qOffset);
84 
85  LayerTestResult<T, n> ret(outputTensorInfo);
86 
87  // Each row is independently softmax'd.
88  auto input = MakeTensor<T, n>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
89 
90  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
91  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
92 
94  data.m_Parameters.m_Beta = beta;
95  data.m_Parameters.m_Axis = axis;
96 
98  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
99  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
100 
101  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
102 
103  inputHandle->Allocate();
104  outputHandle->Allocate();
105  CopyDataToITensorHandle(inputHandle.get(), input.origin());
106 
107  ARMNN_ASSERT(workload);
108 
109  ExecuteWorkload(*workload, memoryManager);
110 
111  CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
112 
113  std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputData, qScale, qOffset);
114  ret.outputExpected = MakeTensor<T, n>(outputTensorInfo, expectedOutput);
115 
116  return ret;
117 }
118 
119 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
120 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
121  armnn::IWorkloadFactory& workloadFactory,
123  float beta)
124 {
125  using std::exp;
126  const armnn::TensorShape inputShape{ 2, 4 };
127 
128  float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
129  exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
130  float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
131  float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
132  exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
133  float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
134 
135  const std::vector<float> outputData = { x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
136  x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1 };
137 
138  const std::vector<float> inputData =
139  {
140  0.f, 1.f, 0.f, 0.f,
141  .5f, 0.f, 0.f, 0.f,
142  };
143 
144  return SimpleSoftmaxBaseTestImpl<ArmnnType, 2>(workloadFactory, memoryManager, beta,
145  inputShape, outputData, inputData);
146 }
147 
148 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
149 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
150  armnn::IWorkloadFactory& workloadFactory,
152  float beta,
153  int axis)
154 {
155  armnn::TensorShape inputShape;
156  std::vector<float> inputData;
157  std::vector<float> outputData;
158  switch (axis)
159  {
160  case -2:
161  case 0:
162  {
163  inputShape = {5, 2};
164 
165  inputData =
166  {
167  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
168  };
169 
170  outputData =
171  {
172  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
173  0.087144312427294f,
174  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
175  7.246299848982885e-08f
176  };
177  break;
178  }
179  case -1:
180  case 1:
181  {
182  inputShape = {2, 5};
183 
184  inputData =
185  {
186  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
187  };
188 
189  outputData =
190  {
191  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
192  7.246299848982885e-08f,
193  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
194  7.246299848982885e-08f
195  };
196  break;
197  }
198  }
199  return SimpleSoftmaxBaseTestImpl<ArmnnType, 2>(workloadFactory, memoryManager, beta,
200  inputShape, outputData, inputData, axis);
201 }
202 
203 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
204 LayerTestResult<T, 3> Simple3dSoftmaxTestImpl(
205  armnn::IWorkloadFactory& workloadFactory,
207  float beta,
208  const armnn::TensorShape& inputShape,
209  const std::vector<float>& outputData,
210  const std::vector<float>& inputData,
211  int axis = 1)
212 {
213  return SimpleSoftmaxBaseTestImpl<ArmnnType, 3>(workloadFactory, memoryManager, beta,
214  inputShape, outputData, inputData, axis);
215 }
216 
217 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
218 LayerTestResult<T, 4> Simple4dSoftmaxTestImpl(
219  armnn::IWorkloadFactory& workloadFactory,
221  float beta,
222  const armnn::TensorShape& inputShape,
223  const std::vector<float>& outputData,
224  const std::vector<float>& inputData,
225  int axis = 1)
226 {
227 
228  return SimpleSoftmaxBaseTestImpl<ArmnnType, 4>(workloadFactory, memoryManager, beta,
229  inputShape, outputData, inputData, axis);
230 }
231 
232 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
233 LayerTestResult<T, 2> CompareSoftmaxTestImpl(
234  armnn::IWorkloadFactory& workloadFactory,
236  armnn::IWorkloadFactory& refWorkloadFactory,
237  float beta)
238 {
239 
240  const int batchSize = 20;
241  const int channels = 30;
242 
243  armnn::TensorInfo inputTensorInfo;
244  armnn::TensorInfo outputTensorInfo;
245 
246  unsigned int inputShape[] = { batchSize, channels };
247 
248  inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
249  outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
250  float qScale = 1.f / 256.f;
251  int qOffset = 0;
252  inputTensorInfo.SetQuantizationScale(qScale);
253  inputTensorInfo.SetQuantizationOffset(qOffset);
254  outputTensorInfo.SetQuantizationScale(qScale);
255  outputTensorInfo.SetQuantizationOffset(qOffset);
256 
257 
258  LayerTestResult<T, 2> ret(outputTensorInfo);
259  auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
260 
261  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
262  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
263 
265  data.m_Parameters.m_Beta = beta;
266 
267  armnn::WorkloadInfo info;
268  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
269  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
270 
271  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
272  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
273 
274 
275  armnn::SoftmaxQueueDescriptor refData = data;
276  armnn::WorkloadInfo refInfo = info;
277  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
278  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
279 
280  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
281  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
282 
283  outputHandleRef->Allocate();
284  inputHandleRef->Allocate();
285 
286  inputHandle->Allocate();
287  outputHandle->Allocate();
288 
289  CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
290  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
291 
292  ExecuteWorkload(*workload, memoryManager);
293 
294  workloadRef->Execute();
295 
296  CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
297  CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());
298 
299  return ret;
300 }
301 
302 } // anonymous namespace
303 
305  armnn::IWorkloadFactory& workloadFactory,
307  float beta)
308 {
309  return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
310 }
311 
313  armnn::IWorkloadFactory& workloadFactory,
315  float beta,
316  int axis)
317 {
318  return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
319 }
320 
322  armnn::IWorkloadFactory& workloadFactory,
324  float beta)
325 {
326  Simple3dSoftmaxOutputData data;
327  return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
328  data.inputShape, data.outputData, data.inputData);
329 }
330 
332  armnn::IWorkloadFactory& workloadFactory,
334  float beta,
335  int axis)
336 {
337  armnn::TensorShape inputShape;
338  std::vector<float> inputData;
339  std::vector<float> outputData;
340  switch (axis)
341  {
342  case -3:
343  case 0:
344  {
345  inputShape = {5, 2, 2};
346 
347  inputData =
348  {
349  17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
350 
351  15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
352  };
353 
354  outputData =
355  {
356  0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
357  0.236882800924671f,
358  0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
359  0.087144312427294f,
360 
361  0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
362  0.032058600957022f,
363  0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
364  7.246299848982885e-08f
365  };
366  break;
367  }
368  case -2:
369  case 1:
370  {
371  inputShape = {2, 5, 2};
372 
373  inputData =
374  {
375  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
376 
377  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
378  };
379 
380  outputData =
381  {
382  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
383  0.087144312427294f,
384  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
385  7.246299848982885e-08f,
386 
387  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
388  0.087144312427294f,
389  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
390  7.246299848982885e-08f
391  };
392  break;
393  }
394  case -1:
395  case 2:
396  {
397  inputShape = {2, 2, 5};
398 
399  inputData =
400  {
401  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
402  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
403  };
404 
405  outputData =
406  {
407  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
408  7.246299848982885e-08f,
409  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
410  7.246299848982885e-08f,
411 
412  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
413  7.246299848982885e-08f,
414  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
415  7.246299848982885e-08f
416  };
417  break;
418  }
419  }
420 
421  return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
422  inputShape, outputData, inputData, axis);
423 }
424 
426  armnn::IWorkloadFactory& workloadFactory,
428  float beta)
429 {
430  Simple4dSoftmaxData data;
431  return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
432  data.outputData, data.inputData);
433 }
434 
436  armnn::IWorkloadFactory& workloadFactory,
438  float beta,
439  int axis)
440 {
441  armnn::TensorShape inputShape;
442  std::vector<float> inputData;
443  std::vector<float> outputData;
444  switch (axis)
445  {
446  case -4:
447  case 0:
448  {
449  inputShape = {5, 2, 2, 2};
450 
451  inputData =
452  {
453  17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
454  16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
455  15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
456  14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
457  };
458 
459  outputData =
460  {
461  0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
462  0.643914213228014f,
463  0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
464  0.236882800924671f,
465  0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
466  0.236882800924671f,
467  0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
468  0.087144312427294f,
469 
470  0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
471  0.032058600957022f,
472  0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
473  0.032058600957022f,
474  0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
475  7.246299848982885e-08f,
476  7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
477  7.246299848982885e-08f, 7.246299848982885e-08f
478  };
479  break;
480  }
481  case -3:
482  case 1:
483  {
484  inputShape = {2, 5, 2, 2};
485 
486  inputData =
487  {
488  17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
489  15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
490  17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
491  15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
492  };
493 
494  outputData =
495  {
496  0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
497  0.236882800924671f,
498  0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
499  0.087144312427294f,
500  0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
501  0.032058600957022f,
502  0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
503  7.246299848982885e-08f,
504 
505 
506  0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
507  0.236882800924671f,
508  0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
509  0.087144312427294f,
510  0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
511  0.032058600957022f,
512  0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
513  7.246299848982885e-08f
514  };
515  break;
516  }
517  case -2:
518  case 2:
519  {
520  inputShape = {2, 2, 5, 2};
521 
522  inputData =
523  {
524  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
525  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
526  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
527  17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
528  };
529 
530  outputData =
531  {
532  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
533  0.087144312427294f,
534  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
535  7.246299848982885e-08f,
536  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
537  0.087144312427294f,
538  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
539  7.246299848982885e-08f,
540 
541  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
542  0.087144312427294f,
543  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
544  7.246299848982885e-08f,
545  0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
546  0.087144312427294f,
547  0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
548  7.246299848982885e-08f
549  };
550  break;
551  }
552  case -1:
553  case 3:
554  {
555  inputShape = {2, 2, 2, 5};
556 
557  inputData =
558  {
559  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
560  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
561  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
562  17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
563  };
564 
565  outputData =
566  {
567  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
568  7.246299848982885e-08f,
569  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
570  7.246299848982885e-08f,
571  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
572  7.246299848982885e-08f,
573  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
574  7.246299848982885e-08f,
575 
576  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
577  7.246299848982885e-08f,
578  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
579  7.246299848982885e-08f,
580  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
581  7.246299848982885e-08f,
582  0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
583  7.246299848982885e-08f
584  };
585  break;
586  }
587  }
588 
589  return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(
590  workloadFactory,
591  memoryManager,
592  beta,
593  inputShape,
594  outputData,
595  inputData,
596  axis);
597 }
598 
600  armnn::IWorkloadFactory& workloadFactory,
602  float beta)
603 {
604  return SimpleSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta);
605 }
606 
608  armnn::IWorkloadFactory& workloadFactory,
610  float beta)
611 {
612  Simple3dSoftmaxOutputData data;
613  return Simple3dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
614  workloadFactory,
615  memoryManager,
616  beta,
617  data.inputShape,
618  data.outputData,
619  data.inputData);
620 }
621 
623  armnn::IWorkloadFactory& workloadFactory,
625  float beta)
626 {
627  Simple4dSoftmaxData data;
628 
629  return Simple4dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta,
630  data.inputShape, data.outputData, data.inputData);
631 }
632 
634  armnn::IWorkloadFactory& workloadFactory,
636  float beta)
637 {
638  return SimpleSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta);
639 }
640 
642  armnn::IWorkloadFactory& workloadFactory,
644  float beta)
645 {
646  Simple3dSoftmaxOutputData data;
647  return Simple3dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta,
648  data.inputShape, data.outputData, data.inputData);
649 }
650 
652  armnn::IWorkloadFactory& workloadFactory,
654  float beta)
655 {
656  Simple4dSoftmaxData data;
657  return Simple4dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta,
658  data.inputShape, data.outputData, data.inputData);
659 }
660 
662  armnn::IWorkloadFactory& workloadFactory,
664  float beta)
665 {
666  return SimpleSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta);
667 }
668 
670  armnn::IWorkloadFactory& workloadFactory,
672  float beta)
673 {
674  Simple3dSoftmaxOutputData data;
675  return Simple3dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
676  data.inputShape, data.outputData, data.inputData);
677 }
678 
680  armnn::IWorkloadFactory& workloadFactory,
682  float beta)
683 {
684  Simple4dSoftmaxData data;
685 
686  return Simple4dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
687  data.inputShape, data.outputData, data.inputData);
688 }
689 
691  armnn::IWorkloadFactory& workloadFactory,
693  armnn::IWorkloadFactory& refWorkloadFactory,
694  float beta)
695 {
696  return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
697  workloadFactory, memoryManager, refWorkloadFactory, beta);
698 }
699 
701  armnn::IWorkloadFactory& workloadFactory,
703  armnn::IWorkloadFactory& refWorkloadFactory,
704  float beta)
705 {
706  return CompareSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
707  workloadFactory, memoryManager, refWorkloadFactory, beta);
708 }
LayerTestResult< int16_t, 2 > SimpleSoftmaxUint16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
LayerTestResult< uint8_t, 3 > Simple3dSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
float m_Beta
Exponentiation value.
LayerTestResult< int16_t, 3 > Simple3dSoftmaxUint16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< armnn::Half, 3 > Simple3dSoftmaxFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
void IgnoreUnused(Ts &&...)
LayerTestResult< int16_t, 4 > Simple4dSoftmaxUint16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< uint8_t, 2 > SimpleSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< uint8_t, 4 > Simple4dSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< float, 3 > Simple3dSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< armnn::Half, 4 > Simple4dSoftmaxFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< float, 2 > CompareSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, float beta)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 3 > Simple3dAxisSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta, int axis)
LayerTestResult< float, 2 > SimpleAxisSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta, int axis)
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 2 > SimpleSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:276
LayerTestResult< float, 4 > Simple4dAxisSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta, int axis)
LayerTestResult< float, 4 > Simple4dSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< uint8_t, 2 > CompareSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, float beta)
LayerTestResult< armnn::Half, 2 > SimpleSoftmaxFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)