From 3ec3077b4eaedcc0c20ab5774bdbe365da541445 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 8 Mar 2023 13:47:17 +0000 Subject: IVGCVSW-3808 Add ElementwiseBinaryLayer !android-nn-driver:9329 * Added ElementwiseBinaryLayer that can represent all ElementwiseBinary operations including Add, Div, Sub, Maximum, Mul and Minimum. * Updated Delegate to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated Deserializer to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated OnnxParser to use ElementwiseBinaryLayer instead of the Add layer. * Updated TfLiteParser to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated CL and Neon tests to use ElementwiseBinaryLayer. * Updated CL and Neon Backend Specific Optimizations to accept ElementBinaryLayers as well as Add, Div, Mul, Sub, Maximum and Minimum layers. Signed-off-by: Teresa Charlin Signed-off-by: Mike Kelly Change-Id: I7cbb96b60eb01f0e2b57b0541016d48a08b86c75 --- src/backends/cl/test/ClCreateWorkloadTests.cpp | 46 ++++++++------------------ src/backends/cl/test/ClFallbackTests.cpp | 18 +++++----- src/backends/cl/test/Fp16SupportTest.cpp | 4 +-- 3 files changed, 24 insertions(+), 44 deletions(-) (limited to 'src/backends/cl/test') diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index adea733582..c49ca23266 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -66,19 +66,17 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "CreateActivationFloat16Workload") } template -static void ClCreateElementwiseWorkloadTest() +static void ClCreateElementwiseWorkloadTest(BinaryOperation binaryOperator) { Graph graph; ClWorkloadFactory factory = ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager()); - auto workload = CreateElementwiseWorkloadTest(factory, graph); + auto workload = CreateElementwiseBinaryWorkloadTest(factory, graph, binaryOperator); // Checks that inputs/outputs are as we expect them (see definition of CreateElementwiseWorkloadTest). - DescriptorType queueDescriptor = workload->GetData(); + auto queueDescriptor = workload->GetData(); auto inputHandle1 = PolymorphicDowncast(queueDescriptor.m_Inputs[0]); auto inputHandle2 = PolymorphicDowncast(queueDescriptor.m_Inputs[1]); auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); @@ -93,73 +91,55 @@ static void ClCreateElementwiseWorkloadTest() TEST_CASE_FIXTURE(ClContextControlFixture, "CreateAdditionFloatWorkload") { ClCreateElementwiseWorkloadTest(); + armnn::DataType::Float32>(BinaryOperation::Add); } TEST_CASE_FIXTURE(ClContextControlFixture, "CreateAdditionFloat16Workload") { ClCreateElementwiseWorkloadTest(); + armnn::DataType::Float16>(BinaryOperation::Add); } TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSubtractionFloatWorkload") { ClCreateElementwiseWorkloadTest(); + armnn::DataType::Float32>(BinaryOperation::Sub); } TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSubtractionFloat16Workload") { ClCreateElementwiseWorkloadTest(); + armnn::DataType::Float16>(BinaryOperation::Sub); } TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationFloatWorkloadTest") { ClCreateElementwiseWorkloadTest(); + armnn::DataType::Float32>(BinaryOperation::Mul); } TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationFloat16WorkloadTest") { ClCreateElementwiseWorkloadTest(); + armnn::DataType::Float16>(BinaryOperation::Mul); } TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationUint8WorkloadTest") { ClCreateElementwiseWorkloadTest(); + armnn::DataType::QAsymmU8>(BinaryOperation::Mul); } TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDivisionFloatWorkloadTest") { ClCreateElementwiseWorkloadTest(); + armnn::DataType::Float32>(BinaryOperation::Div); } TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDivisionFloat16WorkloadTest") { ClCreateElementwiseWorkloadTest(); + armnn::DataType::Float16>(BinaryOperation::Div); } template AddInputLayer(0, "input0"); IConnectableLayer* input1 = net->AddInputLayer(1, "input1"); IConnectableLayer* input2 = net->AddInputLayer(2, "input2"); - IConnectableLayer* add = net->AddAdditionLayer("add"); - IConnectableLayer* sub = net->AddSubtractionLayer("sub"); + IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add"); + IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub"); IConnectableLayer* output = net->AddOutputLayer(0, "output"); input0->GetOutputSlot(0).Connect(add->GetInputSlot(0)); @@ -172,8 +172,8 @@ TEST_CASE("ClImportDisabledFallbackToNeon") IConnectableLayer* input0 = net->AddInputLayer(0, "input0"); IConnectableLayer* input1 = net->AddInputLayer(1, "input1"); IConnectableLayer* input2 = net->AddInputLayer(2, "input2"); - IConnectableLayer* add = net->AddAdditionLayer("add"); - IConnectableLayer* sub = net->AddSubtractionLayer("sub"); + IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add"); + IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub"); IConnectableLayer* output = net->AddOutputLayer(0, "output"); input0->GetOutputSlot(0).Connect(add->GetInputSlot(0)); @@ -301,8 +301,8 @@ TEST_CASE("ClImportEnabledFallbackSubgraphToNeon") IConnectableLayer* input0 = net->AddInputLayer(0, "input0"); IConnectableLayer* input1 = net->AddInputLayer(1, "input1"); IConnectableLayer* input2 = net->AddInputLayer(2, "input2"); - IConnectableLayer* add = net->AddAdditionLayer("add"); - IConnectableLayer* sub = net->AddSubtractionLayer("sub"); + IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add"); + IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub"); IConnectableLayer* pooling = net->AddPooling2dLayer(desc, "pooling"); IConnectableLayer* output = net->AddOutputLayer(0, "output"); @@ -460,8 +460,8 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon") IConnectableLayer* input0 = net->AddInputLayer(0, "input0"); IConnectableLayer* input1 = net->AddInputLayer(1, "input1"); IConnectableLayer* input2 = net->AddInputLayer(2, "input2"); - IConnectableLayer* add = net->AddAdditionLayer("add"); - IConnectableLayer* sub = net->AddSubtractionLayer("sub"); + IConnectableLayer* add = net->AddElementwiseBinaryLayer(BinaryOperation::Add, "add"); + IConnectableLayer* sub = net->AddElementwiseBinaryLayer(BinaryOperation::Sub, "sub"); IConnectableLayer* pooling = net->AddPooling2dLayer(desc, "pooling"); IConnectableLayer* output = net->AddOutputLayer(0, "output"); diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp index da6ea10926..28ae4795ab 100644 --- a/src/backends/cl/test/Fp16SupportTest.cpp +++ b/src/backends/cl/test/Fp16SupportTest.cpp @@ -28,7 +28,7 @@ TEST_CASE("Fp16DataTypeSupport") Layer* const inputLayer1 = graph.AddLayer(1, "input1"); Layer* const inputLayer2 = graph.AddLayer(2, "input2"); - Layer* const additionLayer = graph.AddLayer("addition"); + Layer* const additionLayer = graph.AddLayer(BinaryOperation::Add, "addition"); Layer* const outputLayer = graph.AddLayer(0, "output"); TensorInfo fp16TensorInfo({1, 2, 3, 5}, armnn::DataType::Float16); @@ -57,7 +57,7 @@ TEST_CASE("Fp16AdditionTest") IConnectableLayer* inputLayer1 = net->AddInputLayer(0); IConnectableLayer* inputLayer2 = net->AddInputLayer(1); - IConnectableLayer* additionLayer = net->AddAdditionLayer(); + IConnectableLayer* additionLayer = net->AddElementwiseBinaryLayer(BinaryOperation::Add); IConnectableLayer* outputLayer = net->AddOutputLayer(0); inputLayer1->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0)); -- cgit v1.2.1