14 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
19 return workloadFactory.CreateAddition(descriptor, info);
27 unsigned int batchSize = 2u;
28 unsigned int channels = 2u;
29 unsigned int height = 2u;
30 unsigned int width = 3u;
32 unsigned int shape[] = { batchSize, channels, height, width };
34 std::vector<float> input1 =
49 std::vector<float> input2 =
65 std::vector<float> output
80 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
97 unsigned int depth = 2u;
98 unsigned int batchSize = 2u;
99 unsigned int channels = 2u;
100 unsigned int height = 2u;
101 unsigned int width = 3u;
103 unsigned int shape[] = { depth, batchSize, channels, height, width };
105 std::vector<float> input1 =
107 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
108 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
110 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
111 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
114 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
115 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
117 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
118 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
122 std::vector<float> input2 =
124 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
125 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
127 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
128 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
131 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
132 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
134 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
135 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
138 std::vector<float> output =
140 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
141 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
143 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
144 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
147 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
148 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
150 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
151 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
154 return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
163 tensorHandleFactory);
166 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
179 if (armnn::IsQuantizedType<T>())
183 inputTensorInfo2.SetQuantizationScale(qScale);
184 inputTensorInfo2.SetQuantizationOffset(qOffset);
185 outputTensorInfo.SetQuantizationScale(qScale);
186 outputTensorInfo.SetQuantizationOffset(qOffset);
189 auto input1 = armnnUtils::QuantizedVector<T>(
202 auto input2 = armnnUtils::QuantizedVector<T>(
209 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
211 auto expectedOutput = armnnUtils::QuantizedVector<T>(
224 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
225 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
226 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
230 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
231 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
232 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
234 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
236 inputHandle1->Allocate();
237 inputHandle2->Allocate();
238 outputHandle->Allocate();
243 workload->PostAllocationConfigure();
250 outputHandle->GetShape(),
251 outputTensorInfo.GetShape());
254 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
267 if (armnn::IsQuantizedType<T>())
271 inputTensorInfo2.SetQuantizationScale(qScale);
272 inputTensorInfo2.SetQuantizationOffset(qOffset);
273 outputTensorInfo.SetQuantizationScale(qScale);
274 outputTensorInfo.SetQuantizationOffset(qOffset);
277 auto input1 = armnnUtils::QuantizedVector<T>(
288 auto input2 = armnnUtils::QuantizedVector<T>(
294 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
296 auto expectedOutput = armnnUtils::QuantizedVector<T>(
307 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
308 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
309 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
313 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
314 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
315 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
317 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
319 inputHandle1->Allocate();
320 inputHandle2->Allocate();
321 outputHandle->Allocate();
326 workload->PostAllocationConfigure();
333 outputHandle->GetShape(),
334 outputTensorInfo.GetShape());
342 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
343 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
351 return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
352 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
360 return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
361 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
369 return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
370 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
378 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
379 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
387 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
388 workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory);
396 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
397 workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory);
405 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
406 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
414 const unsigned int shape0[] = { 1, 2, 2, 3 };
415 const unsigned int shape1[] = { 1, 2, 2, 3 };
417 std::vector<uint8_t> input0(
419 63, 35, 77, 70, 56, 112,
420 203, 28, 252, 168, 245, 91
423 std::vector<uint8_t> input1(
425 21, 7, 175, 231, 175, 210,
426 126, 161, 63, 21, 105, 126
429 std::vector<uint8_t> output(
431 81, 39, 249, 255, 228, 255,
432 255, 186, 255, 186, 255, 214,
435 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
458 const unsigned int shape0[] = { 1, 2, 2, 3 };
459 const unsigned int shape1[] = { 1, 2, 2, 3 };
461 std::vector<int16_t> input0 =
463 63, 35, 77, 70, 56, 112,
464 203, 28, 252, 168, 245, 91
467 std::vector<int16_t> input1 =
469 21, 7, 175, 231, 175, 210,
470 126, 161, 63, 21, 105, 126
473 std::vector<int16_t> output =
475 84, 42, 252, 301, 231, 322,
476 329, 189, 315, 189, 350, 217,
479 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
502 const unsigned int shape0[] = { 1, 2, 2, 3 };
503 const unsigned int shape1[] = { 1, 2, 2, 3 };
505 std::vector<int32_t> input0 =
507 63, 35, 77, 70, 56, 112,
508 203, 28, 252, 168, 245, 91
511 std::vector<int32_t> input1 =
513 21, 7, 175, 231, 175, 210,
514 126, 161, 63, 21, 105, 126
517 std::vector<int32_t> output =
519 84, 42, 252, 301, 231, 322,
520 329, 189, 315, 189, 350, 217,
523 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
556 std::vector<float> poolingInput = {1, 2, 3,
560 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
562 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
579 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
580 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
583 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(queueDescriptor, workloadInfo);
585 std::vector<float> resultMaxPool(poolingOutputTensorInfo.GetNumElements());
595 std::vector<float> addInput = { 12, 16,
599 std::vector<float> actualOutput(addOutputTensorInfo.GetNumElements());
600 std::vector<float> expectedOutput = { 13, 19,
603 std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.
CreateTensorHandle(addInputTensorInfo);
604 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = tensorHandleFactory.
CreateTensorHandle(addOutputTensorInfo);
610 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
611 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
612 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
614 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.
CreateAddition(data, info);
616 poolingInputHandle->Allocate();
617 poolingOutputHandle->Allocate();
618 addInputHandle->Allocate();
619 addOutputHandle->Allocate();
627 workload->PostAllocationConfigure();
629 addWorkload->PostAllocationConfigure();
630 addWorkload->Execute();
636 addOutputHandle->GetShape(),
637 addOutputTensorInfo.GetShape());
648 unsigned int batchSize = 4;
649 unsigned int channels = 1;
650 unsigned int height = 2;
651 unsigned int width = 3;
656 unsigned int shape[] = {batchSize, channels, height, width};
662 auto input1 = MakeRandomTensor<float>(inputTensorInfo1, 1232);
663 auto input2 = MakeRandomTensor<float>(inputTensorInfo2, 456);
665 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
666 std::vector<float> expectedOutput(outputTensorInfo.
GetNumElements());
668 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
669 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
670 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
672 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
673 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
674 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
678 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
679 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
680 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
684 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
685 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
686 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
688 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
689 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreateAddition(refData, refInfo);
691 inputHandle1->Allocate();
692 inputHandle2->Allocate();
693 outputHandle->Allocate();
694 inputHandle1Ref->Allocate();
695 inputHandle2Ref->Allocate();
696 outputHandleRef->Allocate();
703 workload->PostAllocationConfigure();
705 workloadRef->PostAllocationConfigure();
706 workloadRef->Execute();
713 outputHandle->GetShape(),
const TensorShape & GetShape() const
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< float, 4 > AdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerDescriptor m_Parameters
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< float, 4 > CompareAdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
uint32_t m_PoolHeight
Pooling height value.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< T, 4 > AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
LayerTestResult< uint8_t, 4 > AdditionUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > AdditionBroadcastTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const armnn::ITensorHandleFactory &tensorHandleFactory)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< float, 4 > AdditionBroadcastTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int32_t, 4 > AdditionBroadcastInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< int16_t, 4 > AdditionInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 5 > Addition5dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< int32_t, 4 > AdditionInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< int32_t, 4 > AdditionBroadcast1ElementInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
unsigned int GetNumElements() const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)