13 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
18 return workloadFactory.CreateAddition(descriptor, info);
25 unsigned int batchSize = 2u;
26 unsigned int channels = 2u;
27 unsigned int height = 2u;
28 unsigned int width = 3u;
30 unsigned int shape[] = { batchSize, channels, height, width };
32 std::vector<float> input1 =
47 std::vector<float> input2 =
63 std::vector<float> output
78 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
93 unsigned int depth = 2u;
94 unsigned int batchSize = 2u;
95 unsigned int channels = 2u;
96 unsigned int height = 2u;
97 unsigned int width = 3u;
99 unsigned int shape[] = { depth, batchSize, channels, height, width };
101 std::vector<float> input1 =
103 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
104 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
106 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
107 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
110 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
111 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
113 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
114 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
118 std::vector<float> input2 =
120 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
121 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
123 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
124 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
127 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
128 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
130 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
131 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
134 std::vector<float> output =
136 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
137 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
139 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
140 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
143 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
144 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
146 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
147 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
150 return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
161 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
173 if (armnn::IsQuantizedType<T>())
177 inputTensorInfo2.SetQuantizationScale(qScale);
178 inputTensorInfo2.SetQuantizationOffset(qOffset);
179 outputTensorInfo.SetQuantizationScale(qScale);
180 outputTensorInfo.SetQuantizationOffset(qOffset);
183 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
196 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
204 ret.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
218 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.
CreateTensorHandle(inputTensorInfo1);
219 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.
CreateTensorHandle(inputTensorInfo2);
220 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
225 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
226 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
227 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
229 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
231 inputHandle1->Allocate();
232 inputHandle2->Allocate();
233 outputHandle->Allocate();
238 workload->PostAllocationConfigure();
246 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
258 if (armnn::IsQuantizedType<T>())
262 inputTensorInfo2.SetQuantizationScale(qScale);
263 inputTensorInfo2.SetQuantizationOffset(qOffset);
264 outputTensorInfo.SetQuantizationScale(qScale);
265 outputTensorInfo.SetQuantizationOffset(qOffset);
268 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
279 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
286 ret.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
298 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.
CreateTensorHandle(inputTensorInfo1);
299 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.
CreateTensorHandle(inputTensorInfo2);
300 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
305 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
306 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
307 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
309 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
311 inputHandle1->Allocate();
312 inputHandle2->Allocate();
313 outputHandle->Allocate();
318 workload->PostAllocationConfigure();
330 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
331 workloadFactory, memoryManager, 0.0f, 0);
338 return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
339 workloadFactory, memoryManager, 2.f, 0);
346 return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
347 workloadFactory, memoryManager, 2.f, 0);
354 return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
355 workloadFactory, memoryManager, 1.f, 0);
362 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
363 workloadFactory, memoryManager, 0.0f, 0);
370 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
371 workloadFactory, memoryManager, 0.1333333f, 128);
378 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
379 workloadFactory, memoryManager, 0.1333333f, 0);
386 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
387 workloadFactory, memoryManager, 1.f, 0);
394 const unsigned int shape0[] = { 1, 2, 2, 3 };
395 const unsigned int shape1[] = { 1, 2, 2, 3 };
397 std::vector<uint8_t> input0(
399 63, 35, 77, 70, 56, 112,
400 203, 28, 252, 168, 245, 91
403 std::vector<uint8_t> input1(
405 21, 7, 175, 231, 175, 210,
406 126, 161, 63, 21, 105, 126
409 std::vector<uint8_t> output(
411 81, 39, 249, 255, 228, 255,
412 255, 186, 255, 186, 255, 214,
415 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
436 const unsigned int shape0[] = { 1, 2, 2, 3 };
437 const unsigned int shape1[] = { 1, 2, 2, 3 };
439 std::vector<int16_t> input0 =
441 63, 35, 77, 70, 56, 112,
442 203, 28, 252, 168, 245, 91
445 std::vector<int16_t> input1 =
447 21, 7, 175, 231, 175, 210,
448 126, 161, 63, 21, 105, 126
451 std::vector<int16_t> output =
453 84, 42, 252, 301, 231, 322,
454 329, 189, 315, 189, 350, 217,
457 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
478 const unsigned int shape0[] = { 1, 2, 2, 3 };
479 const unsigned int shape1[] = { 1, 2, 2, 3 };
481 std::vector<int32_t> input0 =
483 63, 35, 77, 70, 56, 112,
484 203, 28, 252, 168, 245, 91
487 std::vector<int32_t> input1 =
489 21, 7, 175, 231, 175, 210,
490 126, 161, 63, 21, 105, 126
493 std::vector<int32_t> output =
495 84, 42, 252, 301, 231, 322,
496 329, 189, 315, 189, 350, 217,
499 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
530 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
536 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
538 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
548 descriptor.m_PoolWidth = 1;
549 descriptor.m_StrideX = 2;
550 descriptor.m_StrideY = 2;
556 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
557 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
560 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(queueDescriptor, workloadInfo);
563 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
564 boost::multi_array<float, 4> resultMaxPool;
565 resultMaxPool.resize(shape);
577 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
584 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
591 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.
CreateTensorHandle(addInputTensorInfo);
592 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.
CreateTensorHandle(addOutputTensorInfo);
599 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
600 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
601 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
603 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.
CreateAddition(data, info);
605 poolingInputHandle->Allocate();
606 poolingOutputHandle->Allocate();
607 addInputHandle->Allocate();
608 addOutputHandle->Allocate();
616 workload->PostAllocationConfigure();
618 addWorkload->PostAllocationConfigure();
619 addWorkload->Execute();
632 unsigned int batchSize = 4;
633 unsigned int channels = 1;
634 unsigned int height = 2;
635 unsigned int width = 3;
640 unsigned int shape[] = {batchSize, channels, height, width};
646 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
647 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
652 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.
CreateTensorHandle(inputTensorInfo1);
653 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.
CreateTensorHandle(inputTensorInfo2);
654 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
656 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo1);
657 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo2);
658 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.
CreateTensorHandle(outputTensorInfo);
663 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
664 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
665 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
669 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
670 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
671 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
673 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
674 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreateAddition(refData, refInfo);
676 inputHandle1->Allocate();
677 inputHandle2->Allocate();
678 outputHandle->Allocate();
679 inputHandle1Ref->Allocate();
680 inputHandle2Ref->Allocate();
681 outputHandleRef->Allocate();
688 workload->PostAllocationConfigure();
690 workloadRef->PostAllocationConfigure();
691 workloadRef->Execute();
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > AdditionBroadcastTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
boost::multi_array< T, n > outputExpected
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
LayerTestResult< float, 4 > AdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PoolHeight
Pooling height value.
LayerTestResult< float, 4 > CompareAdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
#define ARMNN_NO_DEPRECATE_WARN_END
LayerTestResult< T, 4 > AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< int32_t, 4 > AdditionBroadcastInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
LayerTestResult< float, 5 > Addition5dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > AdditionInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
boost::multi_array< T, n > output
LayerTestResult< int32_t, 4 > AdditionInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< int32_t, 4 > AdditionBroadcast1ElementInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< float, 4 > AdditionBroadcastTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > AdditionUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)