24 struct Simple3dSoftmaxOutputData
26 const std::vector<float> outputData =
28 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
29 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
34 const std::vector<float> inputData =
36 0.0f, 1.0f, 0.0f, 0.0f,
37 0.5f, 0.0f, 0.0f, 0.0f,
41 struct Simple4dSoftmaxData
45 const std::vector<float> outputData =
47 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
48 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
51 const std::vector<float> inputData =
53 0.0f, 1.0f, 0.0f, 0.0f,
54 0.5f, 0.0f, 0.0f, 0.0f
58 template<armnn::DataType ArmnnType, std::
size_t n,
typename T = armnn::ResolveType<ArmnnType>>
64 const std::vector<float>& outputData,
65 const std::vector<float>& inputData,
71 const float qScale = 1.f / 256.f;
72 const int qOffset = 0;
88 auto input = MakeTensor<T, n>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
91 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
92 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
97 data.m_Parameters.m_Axis = axis;
100 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
101 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
103 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateSoftmax(data, info);
105 inputHandle->Allocate();
106 outputHandle->Allocate();
111 ExecuteWorkload(*workload, memoryManager);
115 std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputData, qScale, qOffset);
116 ret.outputExpected = MakeTensor<T, n>(outputTensorInfo, expectedOutput);
121 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
130 float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
131 exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
132 float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
133 float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
134 exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
135 float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
137 const std::vector<float> outputData = { x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
138 x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1 };
140 const std::vector<float> inputData =
146 return SimpleSoftmaxBaseTestImpl<ArmnnType, 2>(workloadFactory, memoryManager, beta,
147 inputShape, outputData, inputData);
150 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
158 std::vector<float> inputData;
159 std::vector<float> outputData;
169 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
174 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
176 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
177 7.246299848982885e-08f
188 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
193 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
194 7.246299848982885e-08f,
195 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
196 7.246299848982885e-08f
201 return SimpleSoftmaxBaseTestImpl<ArmnnType, 2>(workloadFactory, memoryManager, beta,
202 inputShape, outputData, inputData, axis);
205 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
211 const std::vector<float>& outputData,
212 const std::vector<float>& inputData,
215 return SimpleSoftmaxBaseTestImpl<ArmnnType, 3>(workloadFactory, memoryManager, beta,
216 inputShape, outputData, inputData, axis);
219 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
225 const std::vector<float>& outputData,
226 const std::vector<float>& inputData,
230 return SimpleSoftmaxBaseTestImpl<ArmnnType, 4>(workloadFactory, memoryManager, beta,
231 inputShape, outputData, inputData, axis);
234 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
242 const int batchSize = 20;
243 const int channels = 30;
248 unsigned int inputShape[] = { batchSize, channels };
252 float qScale = 1.f / 256.f;
261 auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
264 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
265 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
272 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
273 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
276 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.
CreateTensorHandle(outputTensorInfo);
277 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo);
282 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
283 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
285 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateSoftmax(data, info);
286 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreateSoftmax(refData, refInfo);
288 outputHandleRef->Allocate();
289 inputHandleRef->Allocate();
291 inputHandle->Allocate();
292 outputHandle->Allocate();
297 ExecuteWorkload(*workload, memoryManager);
299 workloadRef->Execute();
314 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
323 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
331 Simple3dSoftmaxOutputData data;
332 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
333 data.inputShape, data.outputData, data.inputData);
343 std::vector<float> inputData;
344 std::vector<float> outputData;
350 inputShape = {5, 2, 2};
354 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
356 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
361 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
363 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
366 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
368 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
369 7.246299848982885e-08f
376 inputShape = {2, 5, 2};
380 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
382 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
387 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
389 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
390 7.246299848982885e-08f,
392 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
394 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
395 7.246299848982885e-08f
402 inputShape = {2, 2, 5};
406 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
407 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
412 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
413 7.246299848982885e-08f,
414 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
415 7.246299848982885e-08f,
417 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
418 7.246299848982885e-08f,
419 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
420 7.246299848982885e-08f
426 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
427 inputShape, outputData, inputData, axis);
435 Simple4dSoftmaxData data;
436 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
437 data.outputData, data.inputData);
447 std::vector<float> inputData;
448 std::vector<float> outputData;
454 inputShape = {5, 2, 2, 2};
458 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
459 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
460 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
461 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
466 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
468 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
470 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
472 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
475 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
477 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
479 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
480 7.246299848982885e-08f,
481 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
482 7.246299848982885e-08f, 7.246299848982885e-08f
489 inputShape = {2, 5, 2, 2};
493 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
494 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
495 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
496 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
501 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
503 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
505 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
507 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
508 7.246299848982885e-08f,
511 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
513 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
515 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
517 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
518 7.246299848982885e-08f
525 inputShape = {2, 2, 5, 2};
529 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
530 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
531 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
532 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
537 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
539 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
540 7.246299848982885e-08f,
541 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
543 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
544 7.246299848982885e-08f,
546 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
548 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
549 7.246299848982885e-08f,
550 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
552 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
553 7.246299848982885e-08f
560 inputShape = {2, 2, 2, 5};
564 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
565 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
566 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
567 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
572 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
573 7.246299848982885e-08f,
574 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
575 7.246299848982885e-08f,
576 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
577 7.246299848982885e-08f,
578 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
579 7.246299848982885e-08f,
581 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
582 7.246299848982885e-08f,
583 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
584 7.246299848982885e-08f,
585 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
586 7.246299848982885e-08f,
587 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
588 7.246299848982885e-08f
594 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(
609 return SimpleSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta);
617 Simple3dSoftmaxOutputData data;
618 return Simple3dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
632 Simple4dSoftmaxData data;
634 return Simple4dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta,
635 data.inputShape, data.outputData, data.inputData);
643 return SimpleSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta);
651 Simple3dSoftmaxOutputData data;
652 return Simple3dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta,
653 data.inputShape, data.outputData, data.inputData);
661 Simple4dSoftmaxData data;
662 return Simple4dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta,
663 data.inputShape, data.outputData, data.inputData);
671 return SimpleSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta);
679 Simple3dSoftmaxOutputData data;
680 return Simple3dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
681 data.inputShape, data.outputData, data.inputData);
689 Simple4dSoftmaxData data;
691 return Simple4dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
692 data.inputShape, data.outputData, data.inputData);
701 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
702 workloadFactory, memoryManager, refWorkloadFactory, beta);
711 return CompareSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
712 workloadFactory, memoryManager, refWorkloadFactory, beta);
LayerTestResult< int16_t, 2 > SimpleSoftmaxUint16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
LayerTestResult< uint8_t, 3 > Simple3dSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
float m_Beta
Exponentiation value.
LayerTestResult< int16_t, 3 > Simple3dSoftmaxUint16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< armnn::Half, 3 > Simple3dSoftmaxFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
LayerTestResult< int16_t, 4 > Simple4dSoftmaxUint16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< uint8_t, 2 > SimpleSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< uint8_t, 4 > Simple4dSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< float, 3 > Simple3dSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
#define ARMNN_NO_DEPRECATE_WARN_END
LayerTestResult< armnn::Half, 4 > Simple4dSoftmaxFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< float, 2 > CompareSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, float beta)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
#define ARMNN_ASSERT(COND)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 3 > Simple3dAxisSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta, int axis)
LayerTestResult< float, 2 > SimpleAxisSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta, int axis)
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 2 > SimpleSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
void SetQuantizationOffset(int32_t offset)
LayerTestResult< float, 4 > Simple4dAxisSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta, int axis)
LayerTestResult< float, 4 > Simple4dSoftmaxTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
LayerTestResult< uint8_t, 2 > CompareSoftmaxUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, float beta)
LayerTestResult< armnn::Half, 2 > SimpleSoftmaxFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float beta)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)