aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2021-12-03 16:07:42 +0000
committerMike Kelly <mike.kelly@arm.com>2023-03-14 18:30:34 +0000
commitee5872d95455351458ad4373176360200594daa1 (patch)
tree63207fa1e397e1c3cf87072d93c490a234935a8c
parentde547168f108ec1494f18b3ab1ea50bd09f370c1 (diff)
downloadandroid-nn-driver-ee5872d95455351458ad4373176360200594daa1.tar.gz
IVGCVSW-3809 Refactor Elementwise Binary ops to use ElementwiseBinaryLayer
!armnn:9319 * Refactored all functions to convert Add, Div, Maximum, Minimum, Mul and Sub to use ElementwiseBinary layers instead. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Ic05885cd8692e7f1b7032862fb4a395af70e0bcd Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: If9717d6ab236d97c76f6cd39b96bde86c81e4382
-rw-r--r--1.0/HalPolicy.cpp27
-rw-r--r--1.0/HalPolicy.hpp11
-rw-r--r--1.1/HalPolicy.cpp21
-rw-r--r--1.1/HalPolicy.hpp9
-rw-r--r--1.2/HalPolicy.cpp54
-rw-r--r--1.2/HalPolicy.hpp19
-rw-r--r--1.3/HalPolicy.cpp53
-rw-r--r--1.3/HalPolicy.hpp17
-rw-r--r--ConversionUtils.hpp265
-rw-r--r--ConversionUtils_1_2.hpp138
10 files changed, 98 insertions, 516 deletions
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index 624a5f2a..08de1b52 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,7 +20,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
switch (operation.type)
{
case V1_0::OperationType::ADD:
- return ConvertAdd(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Add);
case V1_0::OperationType::AVERAGE_POOL_2D:
return ConvertAveragePool2d(operation, model, data);
case V1_0::OperationType::CONCATENATION:
@@ -50,7 +50,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_0::OperationType::MAX_POOL_2D:
return ConvertMaxPool2d(operation, model, data);
case V1_0::OperationType::MUL:
- return ConvertMul(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Mul);
case V1_0::OperationType::RELU:
return ConvertReLu(operation, model, data);
case V1_0::OperationType::RELU1:
@@ -73,12 +73,6 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
}
}
-bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_0::HalPolicy::ConvertAdd()");
- return ::ConvertAdd<hal_1_0::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_0::HalPolicy::ConvertAveragePool2d()");
@@ -115,6 +109,15 @@ bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model
return ::ConvertDequantize<hal_1_0::HalPolicy>(operation, model, data);
}
+bool HalPolicy::ConvertElementwiseBinary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::BinaryOperation binaryOperation)
+{
+ ALOGV("hal_1_0::HalPolicy::ConvertElementwiseBinary()");
+ return ::ConvertElementwiseBinary<hal_1_0::HalPolicy>(operation, model, data, binaryOperation);
+}
+
bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_0::HalPolicy::ConvertFloor()");
@@ -516,12 +519,6 @@ bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model,
return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
}
-bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_0::HalPolicy::ConvertMul()");
- return ::ConvertMul<hal_1_0::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_0::HalPolicy::ConvertReLu()");
diff --git a/1.0/HalPolicy.hpp b/1.0/HalPolicy.hpp
index 25bc47ce..5d92f0d6 100644
--- a/1.0/HalPolicy.hpp
+++ b/1.0/HalPolicy.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,8 +31,6 @@ public:
static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
private:
- static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data);
@@ -45,6 +43,11 @@ private:
static bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertElementwiseBinary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::BinaryOperation binaryOperation);
+
static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data);
@@ -63,8 +66,6 @@ private:
static bool ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertReLu(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index 53a884ca..cd59cd6b 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2019,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -80,9 +80,9 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
switch (operation.type)
{
case V1_1::OperationType::DIV:
- return ConvertDiv(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Div);
case V1_1::OperationType::SUB:
- return ConvertSub(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Sub);
case V1_1::OperationType::MEAN:
return ConvertMean(operation, model, data);
case V1_1::OperationType::PAD:
@@ -104,16 +104,13 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
}
}
-bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
+bool HalPolicy::ConvertElementwiseBinary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::BinaryOperation binaryOperation)
{
- ALOGV("hal_1_1::HalPolicy::ConvertDiv()");
- return ::ConvertDiv<hal_1_1::HalPolicy>(operation, model, data);
-}
-
-bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_1::HalPolicy::ConvertSub()");
- return ::ConvertSub<hal_1_1::HalPolicy>(operation, model, data);
+ ALOGV("hal_1_1::HalPolicy::ConvertElementwiseBinary()");
+ return ::ConvertElementwiseBinary<hal_1_1::HalPolicy>(operation, model, data, binaryOperation);
}
bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp
index 18bb705c..e1feb830 100644
--- a/1.1/HalPolicy.hpp
+++ b/1.1/HalPolicy.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,8 +31,11 @@ public:
static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
private:
- static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertElementwiseBinary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::BinaryOperation binaryOperation);
+
static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 7dae6a1c..bfc467cf 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -1,12 +1,11 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "HalPolicy.hpp"
#include "DriverOptions.hpp"
-
namespace armnn_driver
{
namespace hal_1_2
@@ -53,7 +52,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_2::OperationType::ABS:
return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
case V1_2::OperationType::ADD:
- return ConvertAdd(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Add);
case V1_2::OperationType::ARGMAX:
return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
case V1_2::OperationType::ARGMIN:
@@ -77,7 +76,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_2::OperationType::DEQUANTIZE:
return ConvertDequantize(operation, model, data);
case V1_2::OperationType::DIV:
- return ConvertDiv(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Div);
case V1_2::OperationType::EQUAL:
return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
case V1_2::OperationType::EXP:
@@ -119,13 +118,13 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_2::OperationType::MAX_POOL_2D:
return ConvertMaxPool2d(operation, model, data);
case V1_2::OperationType::MAXIMUM:
- return ConvertMaximum(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Maximum);
case V1_2::OperationType::MEAN:
return ConvertMean(operation, model, data);
case V1_2::OperationType::MINIMUM:
- return ConvertMinimum(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Minimum);
case V1_2::OperationType::MUL:
- return ConvertMul(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Mul);
case V1_2::OperationType::NEG:
return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
case V1_2::OperationType::NOT_EQUAL:
@@ -177,7 +176,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_2::OperationType::STRIDED_SLICE:
return ConvertStridedSlice(operation, model, data);
case V1_2::OperationType::SUB:
- return ConvertSub(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Sub);
case V1_2::OperationType::TRANSPOSE:
return ConvertTranspose(operation, model, data);
case V1_2::OperationType::TRANSPOSE_CONV_2D:
@@ -192,12 +191,6 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
}
}
-bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_2::HalPolicy::ConvertAdd()");
- return ::ConvertAdd<hal_1_2::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertArgMinMax(const V1_2::Operation& operation,
const V1_2::Model& model,
ConversionData& data,
@@ -270,10 +263,13 @@ bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model
return ::ConvertDequantize_1_2<hal_1_2::HalPolicy>(operation, model, data);
}
-bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
+bool HalPolicy::ConvertElementwiseBinary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ BinaryOperation binaryOperation)
{
- ALOGV("hal_1_2::HalPolicy::ConvertDiv()");
- return ::ConvertDiv<hal_1_2::HalPolicy>(operation, model, data);
+ ALOGV("hal_1_2::HalPolicy::ConvertElementwiseBinary()");
+ return ::ConvertElementwiseBinary<hal_1_2::HalPolicy>(operation, model, data, binaryOperation);
}
bool HalPolicy::ConvertElementwiseUnary(const Operation& operation,
@@ -359,30 +355,12 @@ bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model,
return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, PoolingAlgorithm::Max, model, data);
}
-bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
- return ::ConvertMaximum<hal_1_2::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_2::HalPolicy::ConvertMean()");
return ::ConvertMean<hal_1_2::HalPolicy>(operation, model, data);
}
-bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
- return ::ConvertMinimum<hal_1_2::HalPolicy>(operation, model, data);
-}
-
-bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_2::HalPolicy::ConvertMul()");
- return ::ConvertMul<hal_1_2::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_2::HalPolicy::ConvertPad()");
@@ -473,12 +451,6 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
return ::ConvertSoftmax<hal_1_2::HalPolicy>(operation, model, data);
}
-bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_2::HalPolicy::ConvertSub()");
- return ::ConvertSub<hal_1_2::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index bf4540a6..4121ec9c 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,8 +39,6 @@ public:
static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
private:
- static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertArgMinMax(const Operation& operation,
const Model& model,
ConversionData& data,
@@ -69,10 +67,13 @@ private:
static bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertElementwiseBinary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::BinaryOperation binaryOperation);
+
static bool ConvertElementwiseUnary(const Operation& operation,
const Model& model,
ConversionData& data,
@@ -104,14 +105,8 @@ private:
static bool ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data);
-
- static bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data);
@@ -152,8 +147,6 @@ private:
static bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp
index 5563e806..16b325ba 100644
--- a/1.3/HalPolicy.cpp
+++ b/1.3/HalPolicy.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -24,7 +24,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_3::OperationType::ABS:
return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
case V1_3::OperationType::ADD:
- return ConvertAdd(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Add);
case V1_3::OperationType::ARGMAX:
return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
case V1_3::OperationType::ARGMIN:
@@ -48,7 +48,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_3::OperationType::DEQUANTIZE:
return ConvertDequantize(operation, model, data);
case V1_3::OperationType::DIV:
- return ConvertDiv(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Div);
case V1_3::OperationType::ELU:
return ConvertElu(operation, model, data);
case V1_3::OperationType::EQUAL:
@@ -102,13 +102,13 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_3::OperationType::MAX_POOL_2D:
return ConvertMaxPool2d(operation, model, data);
case V1_3::OperationType::MAXIMUM:
- return ConvertMaximum(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Maximum);
case V1_3::OperationType::MEAN:
return ConvertMean(operation, model, data);
case V1_3::OperationType::MINIMUM:
- return ConvertMinimum(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Minimum);
case V1_3::OperationType::MUL:
- return ConvertMul(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Mul);
case V1_3::OperationType::NEG:
return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
case V1_3::OperationType::NOT_EQUAL:
@@ -164,7 +164,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_3::OperationType::STRIDED_SLICE:
return ConvertStridedSlice(operation, model, data);
case V1_3::OperationType::SUB:
- return ConvertSub(operation, model, data);
+ return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Sub);
case V1_3::OperationType::TRANSPOSE:
return ConvertTranspose(operation, model, data);
case V1_3::OperationType::TRANSPOSE_CONV_2D:
@@ -179,12 +179,6 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
}
}
-bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_3::HalPolicy::ConvertAdd()");
- return ::ConvertAdd<hal_1_3::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertArgMinMax(const V1_3::Operation& operation,
const V1_3::Model& model,
ConversionData& data,
@@ -257,10 +251,13 @@ bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model
return ::ConvertDequantize_1_2<hal_1_3::HalPolicy>(operation, model, data);
}
-bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
+bool HalPolicy::ConvertElementwiseBinary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ BinaryOperation binaryOperation)
{
- ALOGV("hal_1_3::HalPolicy::ConvertDiv()");
- return ::ConvertDiv<hal_1_3::HalPolicy>(operation, model, data);
+ ALOGV("hal_1_3::HalPolicy::ConvertElementwiseBinary()");
+ return ::ConvertElementwiseBinary<hal_1_3::HalPolicy>(operation, model, data, binaryOperation);
}
bool HalPolicy::ConvertElementwiseUnary(const Operation& operation,
@@ -382,30 +379,12 @@ bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model,
return ConvertPooling2d<hal_1_3::HalPolicy>(operation, __func__, PoolingAlgorithm::Max, model, data);
}
-bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_3::HalPolicy::ConvertMaximum()");
- return ::ConvertMaximum<hal_1_3::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_3::HalPolicy::ConvertMean()");
return ::ConvertMean<hal_1_3::HalPolicy>(operation, model, data);
}
-bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_3::HalPolicy::ConvertMinimum()");
- return ::ConvertMinimum<hal_1_3::HalPolicy>(operation, model, data);
-}
-
-bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_3::HalPolicy::ConvertMul()");
- return ::ConvertMul<hal_1_3::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_3::HalPolicy::ConvertPad()");
@@ -508,12 +487,6 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
return ::ConvertSoftmax<hal_1_3::HalPolicy>(operation, model, data);
}
-bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_3::HalPolicy::ConvertSub()");
- return ::ConvertSub<hal_1_3::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_3::HalPolicy::ConvertTanH()");
diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp
index 7411b24b..63e5ee7c 100644
--- a/1.3/HalPolicy.hpp
+++ b/1.3/HalPolicy.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -36,8 +36,6 @@ public:
static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
private:
- static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertArgMinMax(const Operation& operation,
const Model& model,
ConversionData& data,
@@ -66,7 +64,10 @@ private:
static bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertElementwiseBinary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::BinaryOperation binaryOperation);
static bool ConvertElementwiseUnary(const Operation& operation,
const Model& model,
@@ -112,14 +113,8 @@ private:
static bool ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data);
-
- static bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data);
@@ -164,8 +159,6 @@ private:
static bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data);
- static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index efd7010c..78e0e205 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1884,82 +1884,6 @@ bool ConvertPooling2d(const HalOperation& operation,
template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
-bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
-{
- using HalOperand = typename HalPolicy::Operand;
-
- LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
-
- if (!input0.IsValid() || !input1.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- // The FuseActivation parameter is always the input index 2
- // and it should be optional
- ActivationFn activationFunction;
- if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
- if (!outputOperand)
- {
- return false;
- }
-
- const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
- const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsAdditionSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- inputInfo0,
- inputInfo1,
- outputInfo);
- };
-
- if(!IsDynamicTensor(outputInfo))
- {
- validateFunc(outputInfo, isSupported);
- }
- else
- {
- isSupported = AreDynamicTensorsSupported();
- }
-
- if (!isSupported)
- {
- return false;
- }
-
- armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
- startLayer->SetBackendId(setBackend);
-
- bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
- if (!isReshapeSupported)
- {
- return false;
- }
-
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
- data, nullptr, validateFunc, activationFunction);
-
-}
-
-template<typename HalPolicy,
- typename HalOperation = typename HalPolicy::Operation,
- typename HalModel = typename HalPolicy::Model>
bool ConvertArgMinMax(const HalOperation& operation,
const HalModel& model,
ConversionData& data,
@@ -2859,10 +2783,16 @@ bool ConvertDequantize(const HalOperation& operation, const HalModel& model, Con
template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
-bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
+bool ConvertElementwiseBinary(const HalOperation& operation,
+ const HalModel& model,
+ ConversionData& data,
+ armnn::BinaryOperation binaryOperation)
{
using HalOperand = typename HalPolicy::Operand;
+ ALOGV("HalPolicy::ConvertElementwiseBinary()");
+ ALOGV("binaryOperation = %s", GetBinaryOperationAsCString(binaryOperation));
+
LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
@@ -2871,37 +2801,38 @@ bool ConvertDiv(const HalOperation& operation, const HalModel& model, Conversion
return Fail("%s: Operation has invalid inputs", __func__);
}
- // The FuseActivation parameter is always the input index 2
- // and it should be optional
+ // The FuseActivation parameter is always the input index 2, and it should be optional
ActivationFn activationFunction;
if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
{
- return Fail("%s: Operation has invalid inputs", __func__);
+ return Fail("%s: Operation has invalid optional input: activation function", __func__);
}
const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
if (!output)
{
- return Fail("%s: Could not read output 0", __func__);
+ return Fail("%s: Could not read output", __func__);
}
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ armnn::ElementwiseBinaryDescriptor descriptor(binaryOperation);
+
bool isSupported = false;
- armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsDivisionSupported,
+ IsElementwiseBinarySupported,
data.m_Backends,
isSupported,
- setBackend,
+ armnn::BackendId(),
input0.GetTensorInfo(),
input1.GetTensorInfo(),
- outputInfo);
+ outputInfo,
+ binaryOperation);
};
- if(!IsDynamicTensor(outputInfo))
+ if (!IsDynamicTensor(outputInfo))
{
validateFunc(outputInfo, isSupported);
}
@@ -2915,20 +2846,22 @@ bool ConvertDiv(const HalOperation& operation, const HalModel& model, Conversion
return false;
}
- armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
- startLayer->SetBackendId(setBackend);
-
- bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
+ armnn::IConnectableLayer* layer = data.m_Network->AddElementwiseBinaryLayer(descriptor);
+ if (!layer)
+ {
+ return Fail("%s: Could not add the ElementwiseBinaryLayer", __func__);
+ }
+ bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
if (!isReshapeSupported)
{
return false;
}
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
- data, nullptr, validateFunc, activationFunction);
-
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc,
+ activationFunction);
}
+
template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
@@ -3571,79 +3504,6 @@ bool ConvertMean(const HalOperation& operation, const HalModel& model, Conversio
template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
-bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
-{
- using HalOperand = typename HalPolicy::Operand;
-
- LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
-
- if (!input0.IsValid() || !input1.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- // The FuseActivation parameter is always the input index 2
- // and it should be optional
- ActivationFn activationFunction;
- if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
-
- if (outputOperand == nullptr)
- {
- return false;
- }
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsMultiplicationSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- input0.GetTensorInfo(),
- input1.GetTensorInfo(),
- outputInfo);
- };
-
- if(!IsDynamicTensor(outputInfo))
- {
- validateFunc(outputInfo, isSupported);
- }
- else
- {
- isSupported = AreDynamicTensorsSupported();
- }
-
- if (!isSupported)
- {
- return false;
- }
-
- armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
- startLayer->SetBackendId(setBackend);
-
- bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
- if (!isReshapeSupported)
- {
- return false;
- }
-
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
- data, nullptr, validateFunc, activationFunction);
-}
-
-template<typename HalPolicy,
- typename HalOperation = typename HalPolicy::Operation,
- typename HalModel = typename HalPolicy::Model>
bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
{
using HalOperand = typename HalPolicy::Operand;
@@ -3814,77 +3674,6 @@ bool ConvertReshape(const HalOperation& operation, const HalModel& model, Conver
template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
-bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
-{
- using HalOperand = typename HalPolicy::Operand;
-
- LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
-
- if (!input0.IsValid() || !input1.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- // The FuseActivation parameter is always the input index 2
- // and it should be optional
- ActivationFn activationFunction;
- if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsSubtractionSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- input0.GetTensorInfo(),
- input1.GetTensorInfo(),
- outputInfo);
- };
-
- if(IsDynamicTensor(outputInfo))
- {
- isSupported = AreDynamicTensorsSupported();
- }
- else
- {
- validateFunc(outputInfo, isSupported);
- }
-
- if (!isSupported)
- {
- return false;
- }
-
- armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
- startLayer->SetBackendId(setBackend);
-
- bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
- if (!isReshapeSupported)
- {
- return false;
- }
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
- data, nullptr, validateFunc, activationFunction);
-}
-
-template<typename HalPolicy,
- typename HalOperation = typename HalPolicy::Operation,
- typename HalModel = typename HalPolicy::Model>
bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
{
using HalOperand = typename HalPolicy::Operand;
diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp
index ce6be440..2ad14c2f 100644
--- a/ConversionUtils_1_2.hpp
+++ b/ConversionUtils_1_2.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1555,142 +1555,6 @@ bool ConvertLogSoftmax(const HalOperation& operation, const HalModel& model, Con
template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
-bool ConvertMaximum(const HalOperation& operation, const HalModel& model, ConversionData& data)
-{
- using HalOperand = typename HalPolicy::Operand;
-
- ALOGV("HalPolicy::ConvertMaximum()");
-
- LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
-
- if (!input0.IsValid() || !input1.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
- if (!outputOperand)
- {
- return Fail("%s: Could not read output", __func__);
- }
-
- const TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsMaximumSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- input0.GetTensorInfo(),
- input1.GetTensorInfo(),
- outInfo);
- };
-
- if(IsDynamicTensor(outInfo))
- {
- isSupported = AreDynamicTensorsSupported();
- }
- else
- {
- validateFunc(outInfo, isSupported);
- }
-
- if (!isSupported)
- {
- return false;
- }
-
- IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
- layer->SetBackendId(setBackend);
- if (!layer)
- {
- return Fail("%s: Could not add the MaximumLayer", __func__);
- }
- bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
- if (!isReshapeSupported)
- {
- return false;
- }
-
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
-}
-
-template<typename HalPolicy,
- typename HalOperation = typename HalPolicy::Operation,
- typename HalModel = typename HalPolicy::Model>
-bool ConvertMinimum(const HalOperation& operation, const HalModel& model, ConversionData& data)
-{
- using HalOperand = typename HalPolicy::Operand;
-
- ALOGV("HalPolicy::ConvertMinimum()");
-
- LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
- LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
-
- if (!input0.IsValid() || !input1.IsValid())
- {
- return Fail("%s: Operation has invalid inputs", __func__);
- }
-
- const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
- bool isSupported = false;
- armnn::BackendId setBackend;
- auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
- {
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsMinimumSupported,
- data.m_Backends,
- isSupported,
- setBackend,
- input0.GetTensorInfo(),
- input1.GetTensorInfo(),
- outputInfo);
- };
-
- if(IsDynamicTensor(outputInfo))
- {
- isSupported = AreDynamicTensorsSupported();
- }
- else
- {
- validateFunc(outputInfo, isSupported);
- }
-
- if (!isSupported)
- {
- return false;
- }
-
- IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
- layer->SetBackendId(setBackend);
- if (!layer)
- {
- return Fail("%s: Could not add the MinimumLayer", __func__);
- }
- bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
- if (!isReshapeSupported)
- {
- return false;
- }
-
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
-}
-
-template<typename HalPolicy,
- typename HalOperation = typename HalPolicy::Operation,
- typename HalModel = typename HalPolicy::Model>
bool ConvertPadV2(const HalOperation& operation, const HalModel& model, ConversionData& data)
{
using HalOperand = typename HalPolicy::Operand;