13 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
18 return workloadFactory.CreateAddition(descriptor, info);
25 unsigned int batchSize = 2u;
26 unsigned int channels = 2u;
27 unsigned int height = 2u;
28 unsigned int width = 3u;
30 unsigned int shape[] = { batchSize, channels, height, width };
32 std::vector<float> input1 =
47 std::vector<float> input2 =
63 std::vector<float> output
78 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
93 unsigned int depth = 2u;
94 unsigned int batchSize = 2u;
95 unsigned int channels = 2u;
96 unsigned int height = 2u;
97 unsigned int width = 3u;
99 unsigned int shape[] = { depth, batchSize, channels, height, width };
101 std::vector<float> input1 =
103 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
104 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
106 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
107 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
110 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
111 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
113 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
114 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
118 std::vector<float> input2 =
120 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
121 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
123 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
124 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
127 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
128 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
130 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
131 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
134 std::vector<float> output =
136 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
137 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
139 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
140 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
143 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
144 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
146 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
147 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
150 return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
161 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
168 boost::ignore_unused(memoryManager);
173 if (armnn::IsQuantizedType<T>())
177 inputTensorInfo2.SetQuantizationScale(qScale);
178 inputTensorInfo2.SetQuantizationOffset(qOffset);
179 outputTensorInfo.SetQuantizationScale(qScale);
180 outputTensorInfo.SetQuantizationOffset(qOffset);
183 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
196 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
204 ret.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
217 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.
CreateTensorHandle(inputTensorInfo1);
218 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.
CreateTensorHandle(inputTensorInfo2);
219 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
223 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
224 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
225 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
227 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
229 inputHandle1->Allocate();
230 inputHandle2->Allocate();
231 outputHandle->Allocate();
236 workload->PostAllocationConfigure();
244 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
251 boost::ignore_unused(memoryManager);
256 if (armnn::IsQuantizedType<T>())
260 inputTensorInfo2.SetQuantizationScale(qScale);
261 inputTensorInfo2.SetQuantizationOffset(qOffset);
262 outputTensorInfo.SetQuantizationScale(qScale);
263 outputTensorInfo.SetQuantizationOffset(qOffset);
266 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
277 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
284 ret.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
295 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.
CreateTensorHandle(inputTensorInfo1);
296 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.
CreateTensorHandle(inputTensorInfo2);
297 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
301 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
302 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
303 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
305 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
307 inputHandle1->Allocate();
308 inputHandle2->Allocate();
309 outputHandle->Allocate();
314 workload->PostAllocationConfigure();
326 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
327 workloadFactory, memoryManager, 0.0f, 0);
334 return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
335 workloadFactory, memoryManager, 2.f, 0);
342 return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
343 workloadFactory, memoryManager, 2.f, 0);
350 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
351 workloadFactory, memoryManager, 0.0f, 0);
358 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
359 workloadFactory, memoryManager, 0.1333333f, 128);
366 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
367 workloadFactory, memoryManager, 0.1333333f, 0);
374 const unsigned int shape0[] = { 1, 2, 2, 3 };
375 const unsigned int shape1[] = { 1, 2, 2, 3 };
377 std::vector<uint8_t> input0(
379 63, 35, 77, 70, 56, 112,
380 203, 28, 252, 168, 245, 91
383 std::vector<uint8_t> input1(
385 21, 7, 175, 231, 175, 210,
386 126, 161, 63, 21, 105, 126
389 std::vector<uint8_t> output(
391 81, 39, 249, 255, 228, 255,
392 255, 186, 255, 186, 255, 214,
395 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
416 const unsigned int shape0[] = { 1, 2, 2, 3 };
417 const unsigned int shape1[] = { 1, 2, 2, 3 };
419 std::vector<int16_t> input0 =
421 63, 35, 77, 70, 56, 112,
422 203, 28, 252, 168, 245, 91
425 std::vector<int16_t> input1 =
427 21, 7, 175, 231, 175, 210,
428 126, 161, 63, 21, 105, 126
431 std::vector<int16_t> output =
433 84, 42, 252, 301, 231, 322,
434 329, 189, 315, 189, 350, 217,
437 return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
458 boost::ignore_unused(memoryManager);
468 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
474 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
476 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
493 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
494 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
497 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(queueDescriptor, workloadInfo);
500 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
501 boost::multi_array<float, 4> resultMaxPool;
502 resultMaxPool.resize(shape);
514 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
521 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
527 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.
CreateTensorHandle(addInputTensorInfo);
528 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.
CreateTensorHandle(addOutputTensorInfo);
534 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
535 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
536 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
538 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.
CreateAddition(data, info);
540 poolingInputHandle->Allocate();
541 poolingOutputHandle->Allocate();
542 addInputHandle->Allocate();
543 addOutputHandle->Allocate();
551 workload->PostAllocationConfigure();
553 addWorkload->PostAllocationConfigure();
554 addWorkload->Execute();
566 boost::ignore_unused(memoryManager);
567 unsigned int batchSize = 4;
568 unsigned int channels = 1;
569 unsigned int height = 2;
570 unsigned int width = 3;
575 unsigned int shape[] = {batchSize, channels, height, width};
581 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
582 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
586 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.
CreateTensorHandle(inputTensorInfo1);
587 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.
CreateTensorHandle(inputTensorInfo2);
588 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
590 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo1);
591 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo2);
592 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.
CreateTensorHandle(outputTensorInfo);
596 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
597 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
598 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
602 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
603 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
604 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
606 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateAddition(data, info);
607 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreateAddition(refData, refInfo);
609 inputHandle1->Allocate();
610 inputHandle2->Allocate();
611 outputHandle->Allocate();
612 inputHandle1Ref->Allocate();
613 inputHandle2Ref->Allocate();
614 outputHandleRef->Allocate();
621 workload->PostAllocationConfigure();
623 workloadRef->PostAllocationConfigure();
624 workloadRef->Execute();
LayerTestResult< T, 4 > AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > AdditionInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > AdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
uint32_t m_PoolHeight
Pooling height value.
LayerDescriptor m_Parameters
LayerTestResult< float, 5 > Addition5dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
boost::multi_array< T, n > output
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > CompareAdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
LayerTestResult< float, 4 > AdditionBroadcastTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > AdditionUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
boost::multi_array< T, n > outputExpected
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > AdditionBroadcastTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
void SetQuantizationOffset(int32_t offset)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)