14 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
27 unsigned int batchSize = 2u;
28 unsigned int channels = 2u;
29 unsigned int height = 2u;
30 unsigned int width = 3u;
32 unsigned int shape[] = { batchSize, channels, height, width };
34 std::vector<float> input1 =
49 std::vector<float> input2 =
65 std::vector<float> output
80 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
97 unsigned int depth = 2u;
98 unsigned int batchSize = 2u;
99 unsigned int channels = 2u;
100 unsigned int height = 2u;
101 unsigned int width = 3u;
103 unsigned int shape[] = { depth, batchSize, channels, height, width };
105 std::vector<float> input1 =
107 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
108 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
110 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
111 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
114 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
115 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
117 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
118 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
122 std::vector<float> input2 =
124 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
125 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
127 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
128 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
131 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
132 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
134 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
135 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
138 std::vector<float> output =
140 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
141 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
143 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
144 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
147 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
148 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
150 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
151 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
154 return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
163 tensorHandleFactory);
166 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
179 if (armnn::IsQuantizedType<T>())
183 inputTensorInfo2.SetQuantizationScale(qScale);
184 inputTensorInfo2.SetQuantizationOffset(qOffset);
185 outputTensorInfo.SetQuantizationScale(qScale);
186 outputTensorInfo.SetQuantizationOffset(qOffset);
189 auto input1 = armnnUtils::QuantizedVector<T>(
202 auto input2 = armnnUtils::QuantizedVector<T>(
209 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
211 auto expectedOutput = armnnUtils::QuantizedVector<T>(
224 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
225 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
226 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
230 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
231 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
232 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
237 inputHandle1->Allocate();
238 inputHandle2->Allocate();
239 outputHandle->Allocate();
244 workload->PostAllocationConfigure();
251 outputHandle->GetShape(),
252 outputTensorInfo.GetShape());
255 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
268 if (armnn::IsQuantizedType<T>())
272 inputTensorInfo2.SetQuantizationScale(qScale);
273 inputTensorInfo2.SetQuantizationOffset(qOffset);
274 outputTensorInfo.SetQuantizationScale(qScale);
275 outputTensorInfo.SetQuantizationOffset(qOffset);
278 auto input1 = armnnUtils::QuantizedVector<T>(
289 auto input2 = armnnUtils::QuantizedVector<T>(
295 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
297 auto expectedOutput = armnnUtils::QuantizedVector<T>(
308 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
309 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
310 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
314 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
315 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
316 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
321 inputHandle1->Allocate();
322 inputHandle2->Allocate();
323 outputHandle->Allocate();
328 workload->PostAllocationConfigure();
335 outputHandle->GetShape(),
336 outputTensorInfo.GetShape());
344 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
345 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
353 return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
354 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
362 return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
363 workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
371 return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
372 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
380 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
381 workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
389 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
390 workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory);
398 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
399 workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory);
407 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
408 workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
416 const unsigned int shape0[] = { 1, 2, 2, 3 };
417 const unsigned int shape1[] = { 1, 2, 2, 3 };
419 std::vector<uint8_t> input0(
421 63, 35, 77, 70, 56, 112,
422 203, 28, 252, 168, 245, 91
425 std::vector<uint8_t> input1(
427 21, 7, 175, 231, 175, 210,
428 126, 161, 63, 21, 105, 126
431 std::vector<uint8_t> output(
433 81, 39, 249, 255, 228, 255,
434 255, 186, 255, 186, 255, 214,
437 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
460 const unsigned int shape0[] = { 1, 2, 2, 3 };
461 const unsigned int shape1[] = { 1, 2, 2, 3 };
463 std::vector<int16_t> input0 =
465 63, 35, 77, 70, 56, 112,
466 203, 28, 252, 168, 245, 91
469 std::vector<int16_t> input1 =
471 21, 7, 175, 231, 175, 210,
472 126, 161, 63, 21, 105, 126
475 std::vector<int16_t> output =
477 84, 42, 252, 301, 231, 322,
478 329, 189, 315, 189, 350, 217,
481 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
504 const unsigned int shape0[] = { 1, 2, 2, 3 };
505 const unsigned int shape1[] = { 1, 2, 2, 3 };
507 std::vector<int32_t> input0 =
509 63, 35, 77, 70, 56, 112,
510 203, 28, 252, 168, 245, 91
513 std::vector<int32_t> input1 =
515 21, 7, 175, 231, 175, 210,
516 126, 161, 63, 21, 105, 126
519 std::vector<int32_t> output =
521 84, 42, 252, 301, 231, 322,
522 329, 189, 315, 189, 350, 217,
525 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
558 std::vector<float> poolingInput = {1, 2, 3,
562 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
564 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
581 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
582 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
589 std::vector<float> resultMaxPool(poolingOutputTensorInfo.GetNumElements());
599 std::vector<float> addInput = { 12, 16,
603 std::vector<float> actualOutput(addOutputTensorInfo.GetNumElements());
604 std::vector<float> expectedOutput = { 13, 19,
607 std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.
CreateTensorHandle(addInputTensorInfo);
608 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = tensorHandleFactory.
CreateTensorHandle(addOutputTensorInfo);
614 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
615 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
616 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
621 poolingInputHandle->Allocate();
622 poolingOutputHandle->Allocate();
623 addInputHandle->Allocate();
624 addOutputHandle->Allocate();
632 workload->PostAllocationConfigure();
634 addWorkload->PostAllocationConfigure();
635 addWorkload->Execute();
641 addOutputHandle->GetShape(),
642 addOutputTensorInfo.GetShape());
653 unsigned int batchSize = 4;
654 unsigned int channels = 1;
655 unsigned int height = 2;
656 unsigned int width = 3;
661 unsigned int shape[] = {batchSize, channels, height, width};
667 auto input1 = MakeRandomTensor<float>(inputTensorInfo1, 1232);
668 auto input2 = MakeRandomTensor<float>(inputTensorInfo2, 456);
670 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
671 std::vector<float> expectedOutput(outputTensorInfo.
GetNumElements());
673 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
674 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
675 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
677 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo1);
678 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo2);
679 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
683 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
684 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
685 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
689 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
690 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
691 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
698 inputHandle1->Allocate();
699 inputHandle2->Allocate();
700 outputHandle->Allocate();
701 inputHandle1Ref->Allocate();
702 inputHandle2Ref->Allocate();
703 outputHandleRef->Allocate();
710 workload->PostAllocationConfigure();
712 workloadRef->PostAllocationConfigure();
713 workloadRef->Execute();
720 outputHandle->GetShape(),
const TensorShape & GetShape() const
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< float, 4 > AdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerDescriptor m_Parameters
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< float, 4 > CompareAdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
uint32_t m_PoolHeight
Pooling height value.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< T, 4 > AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
LayerTestResult< uint8_t, 4 > AdditionUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > AdditionBroadcastTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const armnn::ITensorHandleFactory &tensorHandleFactory)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< float, 4 > AdditionBroadcastTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int32_t, 4 > AdditionBroadcastInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< int16_t, 4 > AdditionInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 5 > Addition5dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< int32_t, 4 > AdditionInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< int32_t, 4 > AdditionBroadcast1ElementInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
unsigned int GetNumElements() const
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)