aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjosh minor <josh.minor@arm.com>2020-01-08 11:55:35 -0600
committerjosh minor <josh.minor@arm.com>2020-01-13 18:40:36 -0600
commit00a963bb81bd18e7245dc324b44b7c57baa07c05 (patch)
tree15954f0699be76dace93afbac20718778de8e00d
parent1a38cdaefc5e53ba6bbaba54651dda459b81eafe (diff)
downloadandroid-nn-driver-00a963bb81bd18e7245dc324b44b7c57baa07c05.tar.gz
IVGCVSW-3813 Add Unary Elementwise Operation support to the android-nn-driver
* Account for deprecated Abs and Rsqrt layers in ArmNN * Update HAL 1.2 * Neg HAL operation support added !armnn:2542 Signed-off-by: josh minor <josh.minor@arm.com> Change-Id: I043bf64ac7ed3aea65560d72acf664e079917baf Signed-off-by: josh minor <josh.minor@arm.com>
-rw-r--r--1.2/HalPolicy.cpp105
-rw-r--r--1.2/HalPolicy.hpp9
-rw-r--r--ConversionUtils.hpp46
3 files changed, 60 insertions, 100 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index ddd85d9b..61daeef5 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -85,7 +85,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
switch (operation.type)
{
case V1_2::OperationType::ABS:
- return ConvertAbs(operation, model, data);
+ return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
case V1_2::OperationType::ADD:
return ConvertAdd(operation, model, data);
case V1_2::OperationType::ARGMAX:
@@ -175,7 +175,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
case V1_2::OperationType::RSQRT:
- return ConvertRsqrt(operation, model, data);
+ return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
case V1_2::OperationType::SQRT:
return ConvertSqrt(operation, model, data);
case V1_2::OperationType::SQUEEZE:
@@ -202,12 +202,6 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
}
}
-bool HalPolicy::ConvertAbs(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_2::HalPolicy::ConvertAbs()");
- return ::ConvertAbs<hal_1_2::HalPolicy>(operation, model, data);
-}
-
bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_2::HalPolicy::ConvertAdd()");
@@ -648,6 +642,59 @@ bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, Conve
return ::ConvertDiv<hal_1_2::HalPolicy>(operation, model, data);
}
+bool HalPolicy::ConvertElementwiseUnary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ UnaryOperation unaryOperation)
+{
+ ALOGV("hal_1_2::HalPolicy::ConvertElementwiseUnary()");
+ ALOGV("unaryOperation = %s", GetUnaryOperationAsCString(unaryOperation));
+
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
+
+ if (!input.IsValid())
+ {
+ return Fail("%s: Operation has invalid input", __func__);
+ }
+
+ const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ const TensorInfo& inputInfo = input.GetTensorInfo();
+ const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
+ ElementwiseUnaryDescriptor descriptor(unaryOperation);
+
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsElementwiseUnarySupported,
+ data.m_Backends,
+ isSupported,
+ inputInfo,
+ outputInfo,
+ descriptor);
+
+ if (!isSupported)
+ {
+ return false;
+ }
+
+ IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
+ assert(layer != nullptr);
+
+ input.Connect(layer->GetInputSlot(0));
+
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
+}
+
bool HalPolicy::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_2::HalPolicy::ConvertExpandDims()");
@@ -1935,48 +1982,6 @@ bool HalPolicy::ConvertResize(const Operation& operation,
return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
-bool HalPolicy::ConvertRsqrt(const Operation& operation, const Model& model, ConversionData& data)
-{
- ALOGV("hal_1_2::HalPolicy::ConvertRsqrt()");
-
- LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
- if (!input.IsValid())
- {
- return Fail("%s: Operation has invalid input", __func__);
- }
-
- const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- if (IsDynamicTensor(outputInfo))
- {
- return Fail("%s: Dynamic output tensors are not supported", __func__);
- }
-
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsRsqrtSupported,
- data.m_Backends,
- isSupported,
- input.GetTensorInfo(),
- outputInfo);
-
- if (!isSupported)
- {
- return false;
- }
-
- IConnectableLayer* const layer = data.m_Network->AddRsqrtLayer();
- assert(layer != nullptr);
- input.Connect(layer->GetInputSlot(0));
-
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
-}
-
bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_2::HalPolicy::ConvertSpaceToBatchNd()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index 1f0b54d7..e0a5c2fc 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -31,8 +31,6 @@ public:
static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
private:
- static bool ConvertAbs(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertArgMinMax(const Operation& operation,
@@ -63,6 +61,11 @@ private:
static bool ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertElementwiseUnary(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::UnaryOperation unaryOperation);
+
static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data);
@@ -118,8 +121,6 @@ private:
ConversionData& data,
armnn::ResizeMethod resizeMethod);
- static bool ConvertRsqrt(const Operation& operation, const Model& model, ConversionData& data);
-
static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index a0ab9e5a..f8622dd6 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -3591,50 +3591,4 @@ bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model,
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
}
-template<typename HalPolicy,
- typename HalOperation = typename HalPolicy::Operation,
- typename HalModel = typename HalPolicy::Model>
-bool ConvertAbs(const HalOperation& operation, const HalModel& model, ConversionData& data)
-{
- LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
-
- if (!input.IsValid())
- {
- return Fail("%s: Operation has invalid input", __func__);
- }
-
- using HalOperand = typename HalPolicy::Operand;
- const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output 0", __func__);
- }
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- if (IsDynamicTensor(outputInfo))
- {
- return Fail("%s: Dynamic output tensors are not supported", __func__);
- }
-
- bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
- IsAbsSupported,
- data.m_Backends,
- isSupported,
- input.GetTensorInfo(),
- outputInfo);
-
- if (!isSupported)
- {
- return false;
- }
-
- armnn::IConnectableLayer* const layer = data.m_Network->AddAbsLayer();
- assert(layer != nullptr);
- input.Connect(layer->GetInputSlot(0));
-
- return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
-}
-
-
} // namespace armnn_driver