diff options
author | Mike Kelly <mike.kelly@arm.com> | 2023-03-08 13:47:17 +0000 |
---|---|---|
committer | Francis Murtagh <francis.murtagh@arm.com> | 2023-03-14 16:40:09 +0000 |
commit | 3ec3077b4eaedcc0c20ab5774bdbe365da541445 (patch) | |
tree | d601d2000897dec8691bf64cbddc9036f26b8034 /src/backends/backendsCommon/WorkloadData.cpp | |
parent | a088cd00b3cce672d26cdcb4965fc2a86b48f339 (diff) | |
download | armnn-3ec3077b4eaedcc0c20ab5774bdbe365da541445.tar.gz |
IVGCVSW-3808 Add ElementwiseBinaryLayer
!android-nn-driver:9329
* Added ElementwiseBinaryLayer that can represent all ElementwiseBinary
operations including Add, Div, Sub, Maximum, Mul and Minimum.
* Updated Delegate to use ElementwiseBinaryLayer instead of the Add,
Div, Sub, Maximum, Mul and Minimum layers.
* Updated Deserializer to use ElementwiseBinaryLayer instead of the Add,
Div, Sub, Maximum, Mul and Minimum layers.
* Updated OnnxParser to use ElementwiseBinaryLayer instead of the Add
layer.
* Updated TfLiteParser to use ElementwiseBinaryLayer instead of the Add,
Div, Sub, Maximum, Mul and Minimum layers.
* Updated CL and Neon tests to use ElementwiseBinaryLayer.
* Updated CL and Neon Backend Specific Optimizations to accept
ElementBinaryLayers as well as Add, Div, Mul, Sub, Maximum and Minimum
layers.
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I7cbb96b60eb01f0e2b57b0541016d48a08b86c75
Diffstat (limited to 'src/backends/backendsCommon/WorkloadData.cpp')
-rw-r--r-- | src/backends/backendsCommon/WorkloadData.cpp | 34 |
1 files changed, 30 insertions, 4 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 62dfc6a38b..6a5963ddcb 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -35,11 +35,8 @@ DataType GetBiasDataType(DataType inputDataType) case DataType::Float32: return DataType::Float32; case DataType::QAsymmS8: - return DataType::Signed32; case DataType::QAsymmU8: - return DataType::Signed32; case DataType::QSymmS8: - return DataType::Signed32; case DataType::QSymmS16: return DataType::Signed32; default: @@ -3668,6 +3665,35 @@ void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } } +void ElementwiseBinaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string descriptorName{"ElementwiseBinaryQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 2); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + std::vector<DataType> supportedTypes = + { + DataType::BFloat16, + DataType::Float16, + DataType::Float32, + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS16, + DataType::Signed32 + }; + + ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName); + ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName); + + ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input", "output"); + ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input", "output"); +} + void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"}; |