From 3ec3077b4eaedcc0c20ab5774bdbe365da541445 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 8 Mar 2023 13:47:17 +0000 Subject: IVGCVSW-3808 Add ElementwiseBinaryLayer !android-nn-driver:9329 * Added ElementwiseBinaryLayer that can represent all ElementwiseBinary operations including Add, Div, Sub, Maximum, Mul and Minimum. * Updated Delegate to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated Deserializer to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated OnnxParser to use ElementwiseBinaryLayer instead of the Add layer. * Updated TfLiteParser to use ElementwiseBinaryLayer instead of the Add, Div, Sub, Maximum, Mul and Minimum layers. * Updated CL and Neon tests to use ElementwiseBinaryLayer. * Updated CL and Neon Backend Specific Optimizations to accept ElementBinaryLayers as well as Add, Div, Mul, Sub, Maximum and Minimum layers. Signed-off-by: Teresa Charlin Signed-off-by: Mike Kelly Change-Id: I7cbb96b60eb01f0e2b57b0541016d48a08b86c75 --- .../workloads/RefElementwiseBinaryWorkload.cpp | 120 +++++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp (limited to 'src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp') diff --git a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp new file mode 100644 index 0000000000..5dc77f8496 --- /dev/null +++ b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp @@ -0,0 +1,120 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefElementwiseBinaryWorkload.hpp" + +#include "Decoders.hpp" +#include "ElementwiseFunction.hpp" +#include "Encoders.hpp" +#include "RefWorkloadUtils.hpp" +#include "Maximum.hpp" +#include "Minimum.hpp" + +#include + +#include + +#include + +namespace armnn +{ + +template +void ExecuteFunction(std::vector inputs, + std::vector outputs, + BinaryOperation operation) +{ + const TensorInfo& inputInfo0 = GetTensorInfo(inputs[0]); + const TensorInfo& inputInfo1 = GetTensorInfo(inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); + + const TensorShape& inShape0 = inputInfo0.GetShape(); + const TensorShape& inShape1 = inputInfo1.GetShape(); + const TensorShape& outShape = outputInfo.GetShape(); + + std::unique_ptr> input0 = MakeDecoder(inputInfo0, inputs[0]->Map()); + std::unique_ptr> input1 = MakeDecoder(inputInfo1, inputs[1]->Map()); + std::unique_ptr> output = MakeEncoder(outputInfo, outputs[0]->Map()); + + using AddFunction = ElementwiseBinaryFunction>; + using DivFunction = ElementwiseBinaryFunction>; + using MaximumFunction = ElementwiseBinaryFunction>; + using MinimumFunction = ElementwiseBinaryFunction>; + using MulFunction = ElementwiseBinaryFunction>; + using SubFunction = ElementwiseBinaryFunction>; + + switch (operation) + { + case BinaryOperation::Add: + { + AddFunction(inShape0, inShape1, outShape, *input0, *input1, *output); + break; + } + case BinaryOperation::Div: + { + DivFunction(inShape0, inShape1, outShape, *input0, *input1, *output); + break; + } + case BinaryOperation::Maximum: + { + MaximumFunction(inShape0, inShape1, outShape, *input0, *input1, *output); + break; + } + case BinaryOperation::Minimum: + { + MinimumFunction(inShape0, inShape1, outShape, *input0, *input1, *output); + break; + } + case BinaryOperation::Mul: + { + MulFunction(inShape0, inShape1, outShape, *input0, *input1, *output); + break; + } + case BinaryOperation::Sub: + { + SubFunction(inShape0, inShape1, outShape, *input0, *input1, *output); + break; + } + default: + { + throw InvalidArgumentException(std::string("Unsupported binary operation ") + + GetBinaryOperationAsCString(operation), CHECK_LOCATION()); + } + } +} + +RefElementwiseBinaryWorkload::RefElementwiseBinaryWorkload(const ElementwiseBinaryQueueDescriptor& desc, + const WorkloadInfo& info) + : RefBaseWorkload(desc, info) +{} + +void RefElementwiseBinaryWorkload::Execute() const +{ + Execute(m_Data.m_Inputs, m_Data.m_Outputs); +} + +void RefElementwiseBinaryWorkload::ExecuteAsync(ExecutionData& executionData) +{ + + WorkingMemDescriptor* workingMemDescriptor = static_cast(executionData.m_Data); + Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); +} + +void RefElementwiseBinaryWorkload::Execute(std::vector inputs, + std::vector outputs) const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefElementwiseBinaryWorkload_Execute"); + + if (GetTensorInfo(inputs[0]).GetDataType() == DataType::Signed32) + { + ExecuteFunction(inputs, outputs, m_Data.m_Parameters.m_Operation); + } + else + { + ExecuteFunction(inputs, outputs, m_Data.m_Parameters.m_Operation); + } +} + +} // namespace armnn -- cgit v1.2.1