aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-03-31 15:36:25 +0100
committerSadik Armagan <sadik.armagan@arm.com>2020-03-31 15:36:25 +0100
commit51ba2c6e0f83d2891a92c63fc30001de761e0789 (patch)
treedb11303d0f2cda397ee9eda1361b0c1622d216a4
parenta984702d6e44708eb7b2dbc678d47faf04769c67 (diff)
downloadandroid-nn-driver-51ba2c6e0f83d2891a92c63fc30001de761e0789.tar.gz
IVGCVSW-4407 HAL 1.3 Operators Support
* IVGCVSW-4441 Add Support for ANEURALNETWORKS_ELU * IVGCVSW-4443 Add Support for ANEURALNETWORKS_HARD_SWISH * IVGCVSW-4448 Add TENSOR_QUANT8_ASYMM_SIGNED data type support Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Idb9bb3f463b956221711423c15b6557eeb1af7db
-rw-r--r--1.3/HalPolicy.cpp22
-rw-r--r--1.3/HalPolicy.hpp4
-rw-r--r--ConversionUtils.hpp2
-rw-r--r--NnapiSupport.txt2
-rw-r--r--Utils.cpp3
5 files changed, 33 insertions, 0 deletions
diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp
index 0de7573a..28d73197 100644
--- a/1.3/HalPolicy.cpp
+++ b/1.3/HalPolicy.cpp
@@ -45,6 +45,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertDequantize(operation, model, data);
case V1_3::OperationType::DIV:
return ConvertDiv(operation, model, data);
+ case V1_3::OperationType::ELU:
+ return ConvertElu(operation, model, data);
case V1_3::OperationType::EQUAL:
return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
case V1_3::OperationType::EXPAND_DIMS:
@@ -59,6 +61,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertComparison(operation, model, data, ComparisonOperation::GreaterOrEqual);
case V1_3::OperationType::GROUPED_CONV_2D:
return ConvertGroupedConv2d(operation, model, data);
+ case V1_3::OperationType::HARD_SWISH:
+ return ConvertHardSwish(operation, model, data);
case V1_3::OperationType::INSTANCE_NORMALIZATION:
return ConvertInstanceNormalization(operation, model, data);
case V1_3::OperationType::L2_NORMALIZATION:
@@ -223,6 +227,15 @@ bool HalPolicy::ConvertElementwiseUnary(const Operation& operation,
return ::ConvertElementwiseUnary<hal_1_3::HalPolicy>(operation, model, data, unaryOperation);
}
+bool HalPolicy::ConvertElu(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertElu()");
+ ActivationDescriptor desc;
+ desc.m_Function = ActivationFunction::Elu;
+
+ return ::ConvertToActivation<hal_1_3::HalPolicy>(operation, __func__, desc, model, data);
+}
+
bool HalPolicy::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_3::HalPolicy::ConvertExpandDims()");
@@ -247,6 +260,15 @@ bool HalPolicy::ConvertGroupedConv2d(const Operation& operation, const Model& mo
return ::ConvertGroupedConv2d<hal_1_3::HalPolicy>(operation, model, data);
}
+bool HalPolicy::ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertHardSwish()");
+ ActivationDescriptor desc;
+ desc.m_Function = ActivationFunction::HardSwish;
+
+ return ::ConvertToActivation<hal_1_3::HalPolicy>(operation, __func__, desc, model, data);
+}
+
bool HalPolicy::ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_3::HalPolicy::ConvertInstanceNormalization()");
diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp
index f7771a6c..e3f21b1b 100644
--- a/1.3/HalPolicy.hpp
+++ b/1.3/HalPolicy.hpp
@@ -68,6 +68,8 @@ private:
ConversionData& data,
armnn::UnaryOperation unaryOperation);
+ static bool ConvertElu(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data);
@@ -76,6 +78,8 @@ private:
static bool ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 315089c2..8067e53b 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -207,9 +207,11 @@ inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
{
return type == V1_3::OperandType::BOOL ||
+ type == V1_3::OperandType::TENSOR_BOOL8 ||
type == V1_3::OperandType::TENSOR_FLOAT16 ||
type == V1_3::OperandType::TENSOR_FLOAT32 ||
type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
+ type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index 84e643af..d5e077bf 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -25,6 +25,7 @@ DEPTH_TO_SPACE (FLOAT32, FLOAT16, QUANT8_ASYMM)
DEPTHWISE_CONV_2D (FLOAT32, QUANT8_ASYMM)
DEQUANTIZE (FLOAT32 (output only), QUANT8_ASYMM (input only))
DIV (FLOAT32, QUANT8_ASYMM)
+ELU (FLOAT32, QUANT8_ASYMM)
EQUAL (FLOAT32, QUANT8_ASYMM)
EXPAND_DIMS (FLOAT32, FLOAT16, QUANT8_ASYMM)
FLOOR (FLOAT32)
@@ -32,6 +33,7 @@ FULLY_CONNECTED (FLOAT32, QUANT8_ASYMM)
GREATER (FLOAT32, QUANT8_ASYMM)
GREATER_EQUAL (FLOAT32, QUANT8_ASYMM)
GROUPED_CONV_2D (FLOAT32, QUANT8_ASYMM)
+HARD_SWISH (FLOAT32, QUANT8_ASYMM)
INSTANCE_NORMALIZATION (FLOAT32)
L2_NORMALIZATION (FLOAT32)
L2_POOL_2D (FLOAT32, QUANT8_ASYMM)
diff --git a/Utils.cpp b/Utils.cpp
index 8a17b532..aeee800b 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -174,6 +174,9 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
DataType type;
switch (operand.type)
{
+ case V1_3::OperandType::TENSOR_BOOL8:
+ type = armnn::DataType::Boolean;
+ break;
case V1_3::OperandType::TENSOR_FLOAT32:
type = armnn::DataType::Float32;
break;