aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2019-07-30 09:43:18 +0100
committerSadik Armagan <sadik.armagan@arm.com>2019-07-30 08:48:58 +0000
commit5a476a88c3295168f5480b5861f59255b29d8433 (patch)
tree230393f3ea4135e7a6a880c8fc14714c15e81203
parent0a87936e7261fc074797111d274c40fca17d9509 (diff)
downloadandroid-nn-driver-5a476a88c3295168f5480b5861f59255b29d8433.tar.gz
IVGCVSW-3592 Add Support for Quantize to HAL 1.2 Driver
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Ie7421078b2bdd16d7ac67b635953b34721e8c8fe
-rw-r--r--1.2/HalPolicy.cpp43
-rw-r--r--1.2/HalPolicy.hpp2
-rw-r--r--NnapiSupport.txt4
3 files changed, 47 insertions, 2 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 8502640c..dee2175d 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -146,6 +146,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertPadV2(operation, model, data);
case V1_2::OperationType::PRELU:
return ConvertPrelu(operation, model, data);
+ case V1_2::OperationType::QUANTIZE:
+ return ConvertQuantize(operation, model, data);
case V1_2::OperationType::RELU:
return ConvertReLu(operation, model, data);
case V1_2::OperationType::RELU1:
@@ -751,6 +753,47 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con
return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
}
+bool HalPolicy::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_2::HalPolicy::ConvertQuantize()");
+
+ LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
+ if (!input.IsValid())
+ {
+ return Fail("%s: Operation has invalid input", __func__);
+ }
+
+ const Operand* const outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+ if (!outputOperand)
+ {
+ return Fail("%s: Operation has invalid outputs", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsQuantizeSupported,
+ data.m_Backends,
+ isSupported,
+ input.GetTensorInfo(),
+ outputInfo);
+ if (!isSupported)
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
+ assert(layer != nullptr);
+ input.Connect(layer->GetInputSlot(0));
+
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
+}
+
bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index 285a37f1..a51b9a60 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -51,6 +51,8 @@ private:
static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertReLu(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index a57768d7..47c5149a 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -52,7 +52,9 @@ CONV_2D (FLOAT32,QUANT8_ASYMM)
DEPTHWISE_CONV_2D (FLOAT32,QUANT8_ASYMM)
MAXIMUM (FLOAT32,QUANT8_ASYMM)
MINIMUM (FLOAT32,QUANT8_ASYMM)
+PAD_V2 (FLOAT32,QUANT8_ASYMM)
PRELU (FLOAT32,QUANT8_ASYMM)
+QUANTIZE (FLOAT32,QUANT8_ASYMM)
RESIZE_NEAREST_NEIGHBOR (FLOAT32,QUANT8_ASYMM)
SOFTMAX (FLOAT32,QUANT8_ASYMM)
@@ -71,8 +73,6 @@ The following AndroidNN HAL 1.2 operations are currently not supported:
CONCATENATION
LSTM
-PAD_V2
-QUANTIZE
QUANTIZED_16BIT_LSTM
TRANSPOSE_CONV_2D