aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/LayerTests.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test/LayerTests.cpp')
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp2684
1 files changed, 0 insertions, 2684 deletions
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 5fd8f3e641..2d71e60ca4 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -2789,1927 +2789,6 @@ LayerTestResult<float,3> ConcatTest(
return ret;
}
-LayerTestResult<float,4> AdditionTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int batchSize = 2;
- unsigned int channels = 2;
- unsigned int height = 2;
- unsigned int width = 3;
-
- armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
- armnn::TensorInfo outputTensorInfo;
-
- unsigned int shape[] = {batchSize, channels, height, width};
-
- inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
- inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
- outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
-
-
- auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
- {
- 0.0f, 2.0f, 1.0f,
- 0.2f, 1.0f, 2.0f,
-
- 1.0f, 2.0f, 1.0f,
- 0.2f, 1.0f, 2.0f,
-
- 0.0f, 2.0f, 1.0f,
- 4.2f, 1.0f, 2.0f,
-
- 0.0f, 0.0f, 1.0f,
- 0.2f, 1.0f, 2.0f,
- }));
-
- auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
- {
- 1.0f, 2.0f, 1.0f,
- 0.0f, 1.0f, 2.0f,
-
- 1.0f, 2.0f, -2.0f,
- 0.2f, 1.0f, 2.0f,
-
- 0.0f, 2.0f, 1.0f,
- 4.2f, 0.0f, -3.0f,
-
- 0.0f, 0.0f, 1.0f,
- 0.7f, 1.0f, 5.0f,
- }));
-
- LayerTestResult<float,4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
- {
- 1.0f, 4.0f, 2.0f,
- 0.2f, 2.0f, 4.0f,
-
- 2.0f, 4.0f, -1.0f,
- 0.4f, 2.0f, 4.0f,
-
- 0.0f, 4.0f, 2.0f,
- 8.4f, 1.0f, -1.0f,
-
- 0.0f, 0.0f, 2.0f,
- 0.9f, 2.0f, 7.0f,
- }));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::AdditionQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
-
- inputHandle1->Allocate();
- inputHandle2->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
-
- return ret;
-}
-
-LayerTestResult<float, 5> Addition5dTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int depth = 2;
- unsigned int batchSize = 2;
- unsigned int channels = 2;
- unsigned int height = 2;
- unsigned int width = 3;
-
- armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
- armnn::TensorInfo outputTensorInfo;
-
- unsigned int shape[] = {depth, batchSize, channels, height, width};
-
- inputTensorInfo1 = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
- inputTensorInfo2 = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
- outputTensorInfo = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
-
-
- auto input1 = MakeTensor<float, 5>(inputTensorInfo1, std::vector<float>(
- {
- 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
- 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
-
- 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
- 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
-
-
- 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
- 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
-
- 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
- 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
-
- }));
-
- auto input2 = MakeTensor<float, 5>(inputTensorInfo2, std::vector<float>(
- {
- 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
- 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
-
- 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
- 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
-
-
- 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
- 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
-
- 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
- 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
- }));
-
- LayerTestResult<float, 5> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<float, 5>(outputTensorInfo, std::vector<float>(
- {
- 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
- 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
-
- 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
- 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
-
-
- 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
- 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
-
- 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
- 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
- }));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::AdditionQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
-
- inputHandle1->Allocate();
- inputHandle2->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&ret.output[0][0][0][0][0], outputHandle.get());
-
- return ret;
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> AdditionBroadcastTestImpl(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- float qScale,
- int32_t qOffset)
-{
- armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
- armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
- armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo1.SetQuantizationScale(qScale);
- inputTensorInfo1.SetQuantizationOffset(qOffset);
- inputTensorInfo2.SetQuantizationScale(qScale);
- inputTensorInfo2.SetQuantizationOffset(qOffset);
- outputTensorInfo.SetQuantizationScale(qScale);
- outputTensorInfo.SetQuantizationOffset(qOffset);
- }
-
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
- {
- 0.0f,
- 1.0f,
-
- 2.0f,
- 3.0f,
-
- 4.0f,
- 5.0f,
- }));
-
- auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
- {
- 0.5f, 1.5f, 2.5f,
- 3.5f, 4.5f, 5.5f,
- }));
-
- LayerTestResult<T,4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
- {
- 0.5f, 1.5f, 2.5f,
- 4.5f, 5.5f, 6.5f,
-
- 2.5f, 3.5f, 4.5f,
- 6.5f, 7.5f, 8.5f,
-
- 4.5f, 5.5f, 6.5f,
- 8.5f, 9.5f, 10.5f,
- }));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::AdditionQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
-
- inputHandle1->Allocate();
- inputHandle2->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
-
- return ret;
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- float qScale,
- int32_t qOffset)
-{
- armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
- armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
- armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
-
- if (armnn::IsQuantizedType<T>())
- {
- inputTensorInfo1.SetQuantizationScale(qScale);
- inputTensorInfo1.SetQuantizationOffset(qOffset);
- inputTensorInfo2.SetQuantizationScale(qScale);
- inputTensorInfo2.SetQuantizationOffset(qOffset);
- outputTensorInfo.SetQuantizationScale(qScale);
- outputTensorInfo.SetQuantizationOffset(qOffset);
- }
-
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
- {
- 0.0f, 1.0f, 2.0f,
- 3.0f, 4.0f, 5.0f,
- 6.0f, 7.0f, 8.0f,
- 9.0f, 10.0f, 11.0f,
- 12.0f, 13.0f, 14.0f,
- 15.0f, 16.0f, 17.0f,
- }));
-
- auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
- {
- 0.5f,
- }));
-
- LayerTestResult<T,4> ret(outputTensorInfo);
- ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
- {
- 0.5f, 1.5f, 2.5f,
- 3.5f, 4.5f, 5.5f,
- 6.5f, 7.5f, 8.5f,
- 9.5f, 10.5f, 11.5f,
- 12.5f, 13.5f, 14.5f,
- 15.5f, 16.5f, 17.5f,
- }));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::AdditionQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
-
- inputHandle1->Allocate();
- inputHandle2->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
-
- return ret;
-}
-
-LayerTestResult<float, 4> AdditionBroadcastTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0);
-}
-
-LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
- workloadFactory, memoryManager, 2.f, 0);
-}
-
-LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
- workloadFactory, memoryManager, 2.f, 0);
-}
-
-LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0);
-}
-
-LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
- workloadFactory, memoryManager, 0.1333333f, 128);
-}
-
-LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
- workloadFactory, memoryManager, 0.1333333f, 0);
-}
-
-LayerTestResult<float,4> CompareAdditionTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- armnn::IWorkloadFactory& refWorkloadFactory)
-{
- unsigned int batchSize = 4;
- unsigned int channels = 1;
- unsigned int height = 2;
- unsigned int width = 3;
-
- armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
- armnn::TensorInfo outputTensorInfo;
-
- unsigned int shape[] = {batchSize, channels, height, width};
-
- inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
- inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
- outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
-
- auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
- auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
-
- LayerTestResult<float,4> ret(outputTensorInfo);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
- std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::AdditionQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- armnn::AdditionQueueDescriptor refData = data;
- armnn::WorkloadInfo refInfo = info;
- SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
- SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
- SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
-
- inputHandle1->Allocate();
- inputHandle2->Allocate();
- outputHandle->Allocate();
- inputHandle1Ref->Allocate();
- inputHandle2Ref->Allocate();
- outputHandleRef->Allocate();
-
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
- workloadRef->PostAllocationConfigure();
- workloadRef->Execute();
-
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
-
- return ret;
-}
-
-namespace {
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> DivisionTestHelper(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const unsigned int shape0[4],
- const std::vector<T>& values0,
- float scale0,
- int32_t offset0,
- const unsigned int shape1[4],
- const std::vector<T> & values1,
- float scale1,
- int32_t offset1,
- const unsigned int outShape[4],
- const std::vector<T> & outValues,
- float outScale,
- int32_t outOffset)
-{
- armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
- armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
- armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
-
- inputTensorInfo0.SetQuantizationScale(scale0);
- inputTensorInfo0.SetQuantizationOffset(offset0);
-
- inputTensorInfo1.SetQuantizationScale(scale1);
- inputTensorInfo1.SetQuantizationOffset(offset1);
-
- outputTensorInfo.SetQuantizationScale(outScale);
- outputTensorInfo.SetQuantizationOffset(outOffset);
-
- auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::DivisionQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
-
- inputHandle0->Allocate();
- inputHandle1->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-
- return result;
-}
-} // anonymous namespace
-
-LayerTestResult<float,4> DivisionByZeroTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int width = 2;
- const unsigned int height = 2;
- const unsigned int channelCount = 2;
- const unsigned int batchSize = 2;
-
- unsigned int shape[] = { batchSize, channelCount, height, width };
-
- std::vector<float> input0({
- 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
- -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
-
- std::vector<float> input1({
- 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
- 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
-
- std::vector<float> output({
- INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
- -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
-
- return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
- memoryManager,
- shape, input0, 1.0f, 0,
- shape, input1, 1.0f, 0,
- shape, output, 1.0f, 0);
-}
-
-LayerTestResult<float,4> DivisionTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int width = 2;
- const unsigned int height = 2;
- const unsigned int channelCount = 2;
- const unsigned int batchSize = 2;
-
- unsigned int shape[] = { batchSize, channelCount, height, width };
-
- std::vector<float> input0({
- 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- std::vector<float> input1({
- 1, 1, 1, 1, 2, 2, 2, 2,
- 4, 4, 4, 4, 4, 4, 4, 4 });
-
- std::vector<float> output({
- 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
- 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
-
-
- return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
- memoryManager,
- shape, input0, 1.0f, 0,
- shape, input1, 1.0f, 0,
- shape, output, 1.0f, 0);
-}
-
-LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 2, 2, 2 };
- std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
-
- unsigned int shape1[] = { 1, 1, 1, 1 };
- std::vector<float> input1({ 2 });
-
- std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
-
-
- return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 3, 3, 2 };
- std::vector<float> input0({
- 1, 4, 3, 8, 5, 12,
- 7, 16, 9, 20, 11, 24,
- 13, 28, 15, 32, 17, 36});
-
- unsigned int shape1[] = { 1, 1, 1, 2 };
- std::vector<float> input1({ 1, 2 });
-
- std::vector<float> output({
- 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18});
-
- return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<uint8_t,4> DivisionUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int width = 2;
- const unsigned int height = 2;
- const unsigned int channelCount = 2;
- const unsigned int batchSize = 2;
-
- unsigned int shape[] = { batchSize, channelCount, height, width };
-
- std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
- 4, 4, 4, 4, 4, 4, 4, 4 });
-
- std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
- 4, 4, 4, 4, 5, 5, 5, 5});
-
-
- return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
- memoryManager,
- shape, input0, 1.0f, 0,
- shape, input1, 1.0f, 0,
- shape, output, 0.25f, 0);
-}
-
-LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 2, 2, 2 };
- std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
-
- unsigned int shape1[] = { 1, 1, 1, 1 };
- std::vector<uint8_t> input1({ 2 });
-
- std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
-
- return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 3, 3, 2 };
- std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
- 7, 16, 9, 20, 11, 24,
- 13, 28, 15, 32, 17, 36});
-
- unsigned int shape1[] = { 1, 1, 1, 2 };
- std::vector<uint8_t> input1({ 1, 2 });
-
- std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18});
-
- return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<int16_t,4> DivisionInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape[] = { 2, 2, 2, 2 };
-
- std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
- 4, 4, 4, 4, 4, 4, 4, 4 });
-
- std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
- 4, 4, 4, 4, 5, 5, 5, 5});
-
-
- return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
- memoryManager,
- shape, input0, 1.0f, 0,
- shape, input1, 1.0f, 0,
- shape, output, 0.25f, 0);
-}
-
-LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 2, 2, 2 };
- std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
-
- unsigned int shape1[] = { 1, 1, 1, 1 };
- std::vector<int16_t> input1({ 2 });
-
- std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
-
- return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 3, 3, 2 };
- std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
- 7, 16, 9, 20, 11, 24,
- 13, 28, 15, 32, 17, 36});
-
- unsigned int shape1[] = { 1, 1, 1, 2 };
- std::vector<int16_t> input1({ 1, 2 });
-
- std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18});
-
- return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-template<typename DescriptorType>
-std::unique_ptr<armnn::IWorkload> CreateWorkload(
- const armnn::IWorkloadFactory& workloadFactory,
- const armnn::WorkloadInfo& info,
- const DescriptorType& descriptor)
-{
- return CreateWorkload(workloadFactory, info, descriptor);
-};
-
-template<>
-std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
- const armnn::IWorkloadFactory& workloadFactory,
- const armnn::WorkloadInfo& info,
- const armnn::MaximumQueueDescriptor& descriptor)
-{
- return workloadFactory.CreateMaximum(descriptor, info);
-}
-
-template<>
-std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
- const armnn::IWorkloadFactory& workloadFactory,
- const armnn::WorkloadInfo& info,
- const armnn::MinimumQueueDescriptor& descriptor)
-{
- return workloadFactory.CreateMinimum(descriptor, info);
-}
-
-template<>
-std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
- const armnn::IWorkloadFactory& workloadFactory,
- const armnn::WorkloadInfo& info,
- const armnn::EqualQueueDescriptor& descriptor)
-{
- return workloadFactory.CreateEqual(descriptor, info);
-}
-
-template<>
-std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
- const armnn::IWorkloadFactory& workloadFactory,
- const armnn::WorkloadInfo& info,
- const armnn::GreaterQueueDescriptor& descriptor)
-{
- return workloadFactory.CreateGreater(descriptor, info);
-}
-
-namespace {
-
-template <typename Descriptor,
- armnn::DataType ArmnnTypeInput,
- armnn::DataType ArmnnTypeOutput,
- typename TInput = armnn::ResolveType<ArmnnTypeInput>,
- typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
-LayerTestResult<TOutput, 4> ElementwiseTestHelper(
- armnn::IWorkloadFactory & workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
- const unsigned int shape0[4], std::vector<TInput> values0,
- const unsigned int shape1[4], std::vector<TInput> values1,
- const unsigned int outShape[4], std::vector<TOutput> outValues,
- float qScale = 0.0f, int qOffset = 0)
-{
- const uint32_t dimensionCount = 4;
- armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
- armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
- armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
-
- auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
-
- if (armnn::IsQuantizedType<TInput>())
- {
- inputTensorInfo0.SetQuantizationScale(qScale);
- inputTensorInfo0.SetQuantizationOffset(qOffset);
-
- inputTensorInfo1.SetQuantizationScale(qScale);
- inputTensorInfo1.SetQuantizationOffset(qOffset);
-
- outputTensorInfo.SetQuantizationScale(qScale);
- outputTensorInfo.SetQuantizationOffset(qOffset);
- }
-
- LayerTestResult<TOutput,4> ret(outputTensorInfo);
-
- if(ArmnnTypeOutput == armnn::DataType::Boolean)
- {
- ret.compareBoolean = true;
- }
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- Descriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
-
- inputHandle0->Allocate();
- inputHandle1->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- ExecuteWorkload(*workload, memoryManager);
-
- CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
-
- ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
- return ret;
-}
-
-template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
-LayerTestResult<T, 4> ElementwiseTestHelper(
- armnn::IWorkloadFactory & workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
- const unsigned int shape0[4], std::vector<T> values0,
- const unsigned int shape1[4], std::vector<T> values1,
- const unsigned int outShape[4], std::vector<T> outValues,
- float qScale = 0.0f, int qOffset = 0)
-{
- return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
- (workloadFactory,
- memoryManager,
- shape0,
- values0,
- shape1,
- values1,
- outShape,
- outValues,
- qScale,
- qOffset);
-}
-}
-
-LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int width = 2;
- const unsigned int height = 2;
- const unsigned int channelCount = 2;
- const unsigned int batchSize = 2;
-
- unsigned int shape[] = { batchSize, channelCount, height, width };
-
- std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
- 3, 3, 3, 3, 4, 4, 4, 4 });
-
- std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
- 5, 5, 5, 5, 4, 4, 4, 4 });
-
- std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
- 0, 0, 0, 0, 1, 1, 1, 1 });
-
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
-}
-
-LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 2, 2, 2 };
- std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
-
- unsigned int shape1[] = { 1, 1, 1, 1 };
- std::vector<float> input1({ 1 });
-
- std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
-
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
-}
-
-LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<float> input1({ 1, 2, 3});
-
- std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
- 0, 0, 0, 0, 0, 0 });
-
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
-}
-
-LayerTestResult<uint8_t, 4> EqualUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape[] = { 2, 2, 2, 2 };
-
- // See dequantized values to the right.
- std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
- 3, 3, 3, 3, 7, 7, 7, 7 });
-
- std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
- 3, 3, 3, 3, 5, 5, 5, 5 });
-
- std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
- 1, 1, 1, 1, 0, 0, 0, 0 });
-
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
- armnn::DataType::QuantisedAsymm8,
- armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 1 };
-
- std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<uint8_t> input1({ 1 });
-
- std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0 });
-
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
- armnn::DataType::QuantisedAsymm8,
- armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<uint8_t> input1({ 1, 1, 3});
-
- std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
- 0, 0, 0, 0, 0, 0 });
-
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
- armnn::DataType::QuantisedAsymm8,
- armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int width = 2;
- const unsigned int height = 2;
- const unsigned int channelCount = 2;
- const unsigned int batchSize = 2;
-
- unsigned int shape[] = { batchSize, channelCount, height, width };
-
- std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
- 3, 3, 3, 3, 4, 4, 4, 4 });
-
- std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
- 5, 5, 5, 5, 4, 4, 4, 4 });
-
- std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
- 0, 0, 0, 0, 0, 0, 0, 0 });
-
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
-}
-
-LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 2, 2, 2 };
- std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
-
- unsigned int shape1[] = { 1, 1, 1, 1 };
- std::vector<float> input1({ 1 });
-
- std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
-
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
-}
-
-LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<float> input1({ 1, 3, 2});
-
- std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1 });
-
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
-}
-
-LayerTestResult<uint8_t, 4> GreaterUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape[] = { 2, 2, 2, 2 };
-
- // See dequantized values to the right.
- std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
- 3, 3, 3, 3, 5, 5, 5, 5 });
-
- std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
- 2, 2, 2, 2, 5, 5, 5, 5 });
-
- std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 0, 0, 0, 0 });
-
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
- armnn::DataType::QuantisedAsymm8,
- armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 1 };
-
- std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<uint8_t> input1({ 1 });
-
- std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1 });
-
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
- armnn::DataType::QuantisedAsymm8,
- armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<uint8_t> input1({ 1, 1, 3});
-
- std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
- 1, 1, 1, 1, 1, 1 });
-
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
- armnn::DataType::QuantisedAsymm8,
- armnn::DataType::Boolean>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int width = 2;
- const unsigned int height = 2;
- const unsigned int channelCount = 2;
- const unsigned int batchSize = 2;
-
- unsigned int shape[] = { batchSize, channelCount, height, width };
-
- std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
- 3, 3, 3, 3, 4, 4, 4, 4 });
-
- std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
- workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
-}
-
-LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 2, 2, 2 };
- std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
-
- unsigned int shape1[] = { 1, 1, 1, 1 };
- std::vector<float> input1({ 2 });
-
- std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
-
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
-}
-
-LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<float> input1({ 1, 2, 3});
-
- std::vector<float> output({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
-}
-
-LayerTestResult<uint8_t, 4> MaximumUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape[] = { 2, 2, 2, 2 };
-
- // See dequantized values to the right.
- std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
- 3, 3, 3, 3, 4, 4, 4, 4 });
-
- std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
- workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 1 };
-
- std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<uint8_t> input1({2});
-
- std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<uint8_t> input1({ 1, 10, 3});
-
- std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
- 7, 10, 9, 10, 11, 12 });
-
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<int16_t, 4> MaximumInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape[] = { 2, 2, 2, 2 };
-
- std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
- 3, 3, 3, 3, 4, 4, 4, 4 });
-
- std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
- workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 1 };
-
- std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<int16_t> input1({2});
-
- std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<int16_t> input1({ 1, 10, 3});
-
- std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
- 7, 10, 9, 10, 11, 12 });
-
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 2, 2, 2 };
- std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
-
- unsigned int shape1[] = { 1, 1, 1, 1 };
- std::vector<float> input1({ 2 });
-
- std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
-
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
-}
-
-
-LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 2, 2, 2 };
- std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
-
- unsigned int shape1[] = { 1, 1, 1, 1 };
- std::vector<float> input1({ 5 });
-
- std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
-
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
-}
-
-LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
- armnn::IWorkloadFactory & workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
- 7, 1, 2, 3, 4, 5 });
-
- std::vector<uint8_t> input1({ 1, 2, 3});
-
- std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
- 1, 1, 2, 1, 2, 3 });
-
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<int16_t, 4> MinimumInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape[] = { 2, 2, 2, 2 };
-
- std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
- 3, 3, 3, 3, 4, 4, 4, 4 });
-
- std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
- 3, 3, 3, 3, 4, 4, 4, 4 });
-
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
- workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 1 };
-
- std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<int16_t> input1({2});
-
- std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2 });
-
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12 });
-
- std::vector<int16_t> input1({ 1, 10, 3});
-
- std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
- 1, 8, 3, 1, 10, 3 });
-
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
- workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-namespace {
-template<std::size_t NumDims>
-LayerTestResult<float,NumDims> MultiplicationTestHelper(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const unsigned int shape0[NumDims],
- const std::vector<float> & values0,
- const unsigned int shape1[NumDims],
- const std::vector<float> & values1,
- const unsigned int outShape[NumDims],
- const std::vector<float> & outValues)
-{
- armnn::TensorInfo inputTensorInfo0{NumDims, shape0, armnn::DataType::Float32};
- armnn::TensorInfo inputTensorInfo1{NumDims, shape1, armnn::DataType::Float32};
- armnn::TensorInfo outputTensorInfo{NumDims, outShape, armnn::DataType::Float32};
-
- auto input0 = MakeTensor<float, NumDims>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<float, NumDims>(inputTensorInfo1, values1);
-
- LayerTestResult<float,NumDims> ret(outputTensorInfo);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::MultiplicationQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
-
- inputHandle0->Allocate();
- inputHandle1->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
- CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
-
- ret.outputExpected = MakeTensor<float, NumDims>(outputTensorInfo, outValues);
- return ret;
-}
-} // anonymous namespace
-
-
-LayerTestResult<float,4> MultiplicationTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int width = 2;
- const unsigned int height = 2;
- const unsigned int channelCount = 2;
- const unsigned int batchSize = 2;
-
- unsigned int shape[] = { batchSize, channelCount, height, width };
-
- std::vector<float> input0({
- 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 3, 3, 3, 4, 4, 4, 4 });
-
- std::vector<float> input1({
- 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 4, 4, 5, 5, 5, 5 });
-
- std::vector<float> output({
- 2, 2, 2, 2, 6, 6, 6, 6,
- 12, 12, 12, 12, 20, 20, 20, 20 });
-
- return MultiplicationTestHelper<4>(workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
-}
-
-LayerTestResult<float,5> Multiplication5dTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int width = 3;
- const unsigned int height = 2;
- const unsigned int channelCount = 2;
- const unsigned int batchSize = 2;
- const unsigned int depth = 2;
-
- unsigned int shape[] = { depth, batchSize, channelCount, height, width };
-
- std::vector<float> input0({
- 1.80f, 0.20f, 2.30f, 1.30f, 2.10f, 1.00f,
- 2.60f, 0.60f, 2.10f, 2.30f, 2.30f, 2.00f,
-
- 2.50f, 1.00f, 2.90f, 3.10f, 1.50f, 2.40f,
- 2.80f, 1.10f, 1.00f, 3.20f, 1.00f, 2.30f,
-
-
- 0.30f, 2.20f, 1.00f, 0.20f, 1.60f, 1.40f,
- 0.80f, 3.20f, 0.10f, 0.10f, 3.10f, 2.10f,
-
- 1.50f, 2.40f, 1.40f, 0.70f, 2.40f, 1.40f,
- 1.60f, 1.20f, 1.90f, 0.80f, 0.00f, 0.10f,
- });
-
- std::vector<float> input1({
- 0.70f, 1.00f, 2.90f, 2.20f, 3.10f, 2.80f,
- 1.80f, 2.00f, 0.50f, 2.30f, 1.20f, 2.70f,
-
- 2.40f, 0.20f, 3.20f, 1.60f, 0.20f, 2.50f,
- 2.30f, 0.70f, 2.70f, 1.80f, 2.90f, 2.70f,
-
-
- 3.20f, 3.20f, 0.70f, 1.90f, 2.70f, 2.50f,
- 2.40f, 0.90f, 2.30f, 1.80f, 2.50f, 2.00f,
-
- 1.60f, 2.20f, 1.60f, 2.00f, 0.30f, 3.20f,
- 0.40f, 3.00f, 2.60f, 0.30f, 0.00f, 2.50f,
- });
-
- std::vector<float> output({
- 1.26f, 0.20f, 6.67f, 2.86f, 6.51f, 2.80f,
- 4.68f, 1.20f, 1.05f, 5.29f, 2.76f, 5.40f,
-
- 6.00f, 0.20f, 9.28f, 4.96f, 0.30f, 6.00f,
- 6.44f, 0.77f, 2.70f, 5.76f, 2.90f, 6.21f,
-
-
- 0.96f, 7.04f, 0.70f, 0.38f, 4.32f, 3.50f,
- 1.92f, 2.88f, 0.23f, 0.18f, 7.75f, 4.20f,
-
- 2.40f, 5.28f, 2.24f, 1.40f, 0.72f, 4.48f,
- 0.64f, 3.60f, 4.94f, 0.24f, 0.00f, 0.25f,
- });
-
- return MultiplicationTestHelper<5>(workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
-}
-
-LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 2, 2, 2 };
- std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
-
- unsigned int shape1[] = { 1, 1, 1, 1 };
- std::vector<float> input1({ 2 });
-
- std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
-
- return MultiplicationTestHelper<4>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
-}
-
-LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int shape0[] = { 1, 3, 3, 2 };
- std::vector<float> input0({
- 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18});
-
- unsigned int shape1[] = { 1, 1, 1, 2 };
- std::vector<float> input1({ 1, 2 });
-
- std::vector<float> output({
- 1, 4, 3, 8, 5, 12,
- 7, 16, 9, 20, 11, 24,
- 13, 28, 15, 32, 17, 36});
-
- return MultiplicationTestHelper<4>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
-}
-
-LayerTestResult<float,4> CompareMultiplicationTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- armnn::IWorkloadFactory& refWorkloadFactory)
-{
- const unsigned int width = 16;
- const unsigned int height = 32;
- const unsigned int channelCount = 2;
- const unsigned int batchSize = 5;
-
- armnn::TensorInfo inputTensorInfo0;
- armnn::TensorInfo inputTensorInfo1;
- armnn::TensorInfo outputTensorInfo;
-
- constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
-
- inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
- inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
- outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
-
- LayerTestResult<float,4> comparisonResult(outputTensorInfo);
-
- auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
- auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
- std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::MultiplicationQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- armnn::MultiplicationQueueDescriptor refData = data;
- armnn::WorkloadInfo refInfo = info;
- SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
- SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
- SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
-
- inputHandle0->Allocate();
- inputHandle1->Allocate();
- outputHandle->Allocate();
- inputHandle0Ref->Allocate();
- inputHandle1Ref->Allocate();
- outputHandleRef->Allocate();
-
- CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
- workloadRef->PostAllocationConfigure();
- workloadRef->Execute();
- CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
- CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
-
- return comparisonResult;
-}
-
LayerTestResult<float,4> CompareBatchNormTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -8467,664 +6546,6 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
return ret;
}
-namespace
-{
-template <typename T>
-LayerTestResult<T, 4> AdditionQuantizeTestHelper(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const unsigned int shape0[4],
- const std::vector<T>& values0,
- float scale0,
- int32_t offset0,
- const unsigned int shape1[4],
- const std::vector<T> & values1,
- float scale1,
- int32_t offset1,
- const unsigned int outShape[4],
- const std::vector<T> & outValues,
- float outScale,
- int32_t outOffset)
-{
- auto dataType = (std::is_same<T, uint8_t>::value ?
- armnn::DataType::QuantisedAsymm8 :
- armnn::DataType::QuantisedSymm16);
-
- armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
- armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
- armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
-
- inputTensorInfo0.SetQuantizationScale(scale0);
- inputTensorInfo0.SetQuantizationOffset(offset0);
-
- inputTensorInfo1.SetQuantizationScale(scale1);
- inputTensorInfo1.SetQuantizationOffset(offset1);
-
- outputTensorInfo.SetQuantizationScale(outScale);
- outputTensorInfo.SetQuantizationOffset(outOffset);
-
- auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::AdditionQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
-
- inputHandle0->Allocate();
- inputHandle1->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-
- return result;
-}
-} // anonymous namespace
-
-LayerTestResult<uint8_t, 4> AdditionUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 2, 2, 3 };
-
- std::vector<uint8_t> input0(
- {
- 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
- 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
- });
-
- std::vector<uint8_t> input1(
- {
- 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
- 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
- });
-
- std::vector<uint8_t> output(
- {
- 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
- 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
- });
-
- return AdditionQuantizeTestHelper(workloadFactory,
- memoryManager,
- shape0, input0, 7.0f, 3,
- shape1, input1, 7.0f, 3,
- shape0, output, 7.0f, 3);
-}
-
-LayerTestResult<int16_t, 4> AdditionInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 2, 2, 3 };
-
- std::vector<int16_t> input0(
- {
- 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
- 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
- });
-
- std::vector<int16_t> input1(
- {
- 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
- 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
- });
-
- std::vector<int16_t> output(
- {
- 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
- 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
- });
-
- return AdditionQuantizeTestHelper(workloadFactory,
- memoryManager,
- shape0, input0, 7.0f, 0,
- shape1, input1, 7.0f, 0,
- shape0, output, 7.0f, 0);
-}
-
-namespace
-{
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const unsigned int shape0[4],
- const std::vector<T> & values0,
- float scale0,
- int32_t offset0,
- const unsigned int shape1[4],
- const std::vector<T> & values1,
- float scale1,
- int32_t offset1,
- const unsigned int outShape[4],
- const std::vector<T> & outValues,
- float outScale,
- int32_t outOffset)
-{
- armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
- armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
- armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
-
- inputTensorInfo0.SetQuantizationScale(scale0);
- inputTensorInfo0.SetQuantizationOffset(offset0);
-
- inputTensorInfo1.SetQuantizationScale(scale1);
- inputTensorInfo1.SetQuantizationOffset(offset1);
-
- outputTensorInfo.SetQuantizationScale(outScale);
- outputTensorInfo.SetQuantizationOffset(outOffset);
-
- auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::MultiplicationQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
-
- inputHandle0->Allocate();
- inputHandle1->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-
- return result;
-}
-} // anonymous namespace
-
-LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- unsigned int batchSize = 1;
- unsigned int channels = 2;
- unsigned int height = 2;
- unsigned int width = 3;
- const unsigned int shape[] = { batchSize, channels, height, width };
-
- // See dequantized values to the right.
- std::vector<uint8_t> input0({
- 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
- 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
- });
-
- // See dequantized values to the right.
- std::vector<uint8_t> input1({
- 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
- 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
- });
-
- // See dequantized values to the right.
- std::vector<uint8_t> output(
- {
- 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
- 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
- });
-
- // Scale/offset chosen to have output values out of range.
- return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
- memoryManager,
- shape,
- input0,
- 4.0f,
- 1,
- shape,
- input1,
- 3.0f,
- -2,
- shape,
- output,
- 1366.255f,
- -5);
-}
-
-LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 1 };
-
- std::vector<uint8_t> input0({
- 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12
- });
-
- std::vector<uint8_t> input1({2});
-
- std::vector<uint8_t> output({
- 2, 4, 6, 8, 10, 12,
- 14, 16, 18, 20, 22, 24
- });
-
- return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- 1.0f,
- 0,
- shape1,
- input1,
- 1.0f,
- 0,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<uint8_t> input0({
- 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12
- });
-
- std::vector<uint8_t> input1({1, 2, 3});
-
- std::vector<uint8_t> output({
- 1, 4, 9, 4, 10, 18,
- 7, 16, 27, 10, 22, 36
- });
-
- return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- 1.0f,
- 0,
- shape1,
- input1,
- 1.0f,
- 0,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<int16_t, 4> MultiplicationInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape[] = { 1, 2, 2, 3 };
-
- std::vector<int16_t> input0(
- {
- 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17
- });
-
- std::vector<int16_t> input1(
- {
- 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12
- });
-
- std::vector<int16_t> output(
- {
- 6, 14, 24, 36, 50, 66,
- 84, 104, 126, 150, 176, 204
- });
-
- return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
- memoryManager,
- shape,
- input0,
- 1.0f,
- 0,
- shape,
- input1,
- 1.0f,
- 0,
- shape,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 1 };
-
- std::vector<int16_t> input0(
- {
- 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12
- });
-
- std::vector<int16_t> input1({2});
-
- std::vector<int16_t> output(
- {
- 2, 4, 6, 8, 10, 12,
- 14, 16, 18, 20, 22, 24
- });
-
- return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- 1.0f,
- 0,
- shape1,
- input1,
- 1.0f,
- 0,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 2, 2, 3 };
- const unsigned int shape1[] = { 1, 1, 1, 3 };
-
- std::vector<int16_t> input0(
- {
- 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12
- });
-
- std::vector<int16_t> input1({1, 2, 3});
-
- std::vector<int16_t> output(
- {
- 1, 4, 9, 4, 10, 18,
- 7, 16, 27, 10, 22, 36
- });
-
- return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- 1.0f,
- 0,
- shape1,
- input1,
- 1.0f,
- 0,
- shape0,
- output,
- 1.0f,
- 0);
-}
-
-namespace
-{
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> SubtractionTestHelper(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const unsigned int shape0[4],
- const std::vector<T>& values0,
- float scale0,
- int32_t offset0,
- const unsigned int shape1[4],
- const std::vector<T> & values1,
- float scale1,
- int32_t offset1,
- const unsigned int outShape[4],
- const std::vector<T> & outValues,
- float outScale,
- int32_t outOffset)
-{
- armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
- armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
- armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
-
- inputTensorInfo0.SetQuantizationScale(scale0);
- inputTensorInfo0.SetQuantizationOffset(offset0);
-
- inputTensorInfo1.SetQuantizationScale(scale1);
- inputTensorInfo1.SetQuantizationOffset(offset1);
-
- outputTensorInfo.SetQuantizationScale(outScale);
- outputTensorInfo.SetQuantizationOffset(outOffset);
-
- auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
-
- LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::SubtractionQueueDescriptor data;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
- AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
-
- inputHandle0->Allocate();
- inputHandle1->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
- CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-
- return result;
-}
-} // anonymous namespace
-
-LayerTestResult<uint8_t, 4> SubtractionUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 1, 2, 2 };
- const unsigned int shape1[] = { 1, 1, 2, 2 };
-
- std::vector<uint8_t> input0({ 10, 12, 14, 16 });
- std::vector<uint8_t> input1({ 1, 2, 1, 2 });
- std::vector<uint8_t> output({ 3, 3, 5, 5 });
-
- return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
- memoryManager,
- shape0, input0, 0.5f, 2,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 1, 2, 2 };
- const unsigned int shape1[] = { 1, 1, 1, 1 };
-
- std::vector<uint8_t> input0({ 10, 12, 14, 16 });
- std::vector<uint8_t> input1({ 2 });
- std::vector<uint8_t> output({ 5, 6, 7, 8 });
-
- return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
- memoryManager,
- shape0, input0, 0.5f, 2,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 3);
-}
-
-LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 1, 2, 2 };
- const unsigned int shape1[] = { 1, 1, 2, 1 };
-
- std::vector<uint8_t> input0({ 10, 12, 14, 16 });
- std::vector<uint8_t> input1({ 2, 1 });
- std::vector<uint8_t> output({ 8, 11, 12, 15 });
-
- return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<float, 4> SubtractionTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 1, 2, 2 };
- const unsigned int shape1[] = { 1, 1, 2, 2 };
-
- std::vector<float> input0({ 1, 2, 3, 4 });
- std::vector<float> input1({ 1, -1, 0, 2 });
- std::vector<float> output({ 0, 3, 3, 2 });
-
- return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 1, 2, 2 };
- const unsigned int shape1[] = { 1, 1, 1, 1 };
-
- std::vector<float> input0({ 1, 2, 3, 4 });
- std::vector<float> input1({ 10 });
- std::vector<float> output({ -9, -8, -7, -6 });
-
- return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<float, 4> SubtractionBroadcastTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 1, 2, 2 };
- const unsigned int shape1[] = { 1, 1, 1, 2 };
-
- std::vector<float> input0({ 1, 2, 3, 4 });
- std::vector<float> input1({ 10, -5 });
- std::vector<float> output({ -9, 7, -7, 9 });
-
- return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<int16_t, 4> SubtractionInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 1, 2, 2 };
- const unsigned int shape1[] = { 1, 1, 2, 2 };
-
- std::vector<int16_t> input0({ 10, 12, 14, 16 });
- std::vector<int16_t> input1({ 1, 2, 1, 2 });
- std::vector<int16_t> output({ 3, 3, 5, 5 });
-
- return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
- memoryManager,
- shape0, input0, 0.5f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 1, 2, 2 };
- const unsigned int shape1[] = { 1, 1, 1, 1 };
-
- std::vector<int16_t> input0({ 10, 12, 14, 16 });
- std::vector<int16_t> input1({ 2 });
- std::vector<int16_t> output({ 3, 4, 5, 6 });
-
- return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
- memoryManager,
- shape0, input0, 0.5f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
-LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- const unsigned int shape0[] = { 1, 1, 2, 2 };
- const unsigned int shape1[] = { 1, 1, 2, 1 };
-
- std::vector<int16_t> input0({ 10, 12, 14, 16 });
- std::vector<int16_t> input1({ 2, 1 });
- std::vector<int16_t> output({ 8, 11, 12, 15 });
-
- return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
- memoryManager,
- shape0, input0, 1.0f, 0,
- shape1, input1, 1.0f, 0,
- shape0, output, 1.0f, 0);
-}
-
LayerTestResult<float, 4> BatchNormTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -10021,111 +7442,6 @@ LayerTestResult<float, 2> FullyConnectedLargeTest(
return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
}
-LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
- // Create Initial Tensor
- // 1, 2, 3
- // 4, 5, 6
- // 7, 8, 9
-
- armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
- armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
-
- boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
- {1, 2, 3,
- 4, 5, 6,
- 7, 8, 9
- });
-
- std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
- workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
- workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
-
- // Apply MaxPool poolSize = 1x1, stride=2x2
- // Result =
- // 1, 3
- // 7, 9
- armnn::Pooling2dDescriptor descriptor;
- descriptor.m_PoolHeight = 1;
- descriptor.m_PoolWidth = 1;
- descriptor.m_StrideX = 2;
- descriptor.m_StrideY = 2;
- descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
-
- armnn::Pooling2dQueueDescriptor queueDescriptor;
- queueDescriptor.m_Parameters = descriptor;
- armnn::WorkloadInfo workloadInfo;
- AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
- AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
-
- // Create the MaxPool
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
-
- //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
- auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
- boost::multi_array<float, 4> resultMaxPool;
- resultMaxPool.resize(shape);
-
-
- // Create addition with another tensor the same size
- // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
- // with the initial tensor.
- // 12, 16
- // 24, 28
-
- armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
- armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
-
- boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
- {12, 16,
- 24, 28,
- });
-
- // Expected output tensor after MaxPool and Addition.
- LayerTestResult<float,4> addRet(addOutputTensorInfo);
- addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
- {
- 13, 19,
- 31, 37
- }));
-
- std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
-
- armnn::AdditionQueueDescriptor data;
- armnn::WorkloadInfo info;
-
- // Add the output of the MaxPool and the new tensor
- AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
- AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
- AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
-
- poolingInputHandle->Allocate();
- poolingOutputHandle->Allocate();
- addInputHandle->Allocate();
- addOutputHandle->Allocate();
-
- CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
- CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
-
- CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
- CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
- addWorkload->PostAllocationConfigure();
- addWorkload->Execute();
-
- CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
-
- return addRet;
-}
-
LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)