From 5a476a88c3295168f5480b5861f59255b29d8433 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 30 Jul 2019 09:43:18 +0100 Subject: IVGCVSW-3592 Add Support for Quantize to HAL 1.2 Driver Signed-off-by: Sadik Armagan Change-Id: Ie7421078b2bdd16d7ac67b635953b34721e8c8fe --- 1.2/HalPolicy.cpp | 43 +++++++++++++++++++++++++++++++++++++++++++ 1.2/HalPolicy.hpp | 2 ++ NnapiSupport.txt | 4 ++-- 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 8502640c..dee2175d 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -146,6 +146,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertPadV2(operation, model, data); case V1_2::OperationType::PRELU: return ConvertPrelu(operation, model, data); + case V1_2::OperationType::QUANTIZE: + return ConvertQuantize(operation, model, data); case V1_2::OperationType::RELU: return ConvertReLu(operation, model, data); case V1_2::OperationType::RELU1: @@ -751,6 +753,47 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +bool HalPolicy::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertQuantize()"); + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid input", __func__); + } + + const Operand* const outputOperand = GetOutputOperand(operation, 0, model); + if (!outputOperand) + { + return Fail("%s: Operation has invalid outputs", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsQuantizeSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outputInfo); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer(); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertReLu()"); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index 285a37f1..a51b9a60 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -51,6 +51,8 @@ private: static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertReLu(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data); diff --git a/NnapiSupport.txt b/NnapiSupport.txt index a57768d7..47c5149a 100644 --- a/NnapiSupport.txt +++ b/NnapiSupport.txt @@ -52,7 +52,9 @@ CONV_2D (FLOAT32,QUANT8_ASYMM) DEPTHWISE_CONV_2D (FLOAT32,QUANT8_ASYMM) MAXIMUM (FLOAT32,QUANT8_ASYMM) MINIMUM (FLOAT32,QUANT8_ASYMM) +PAD_V2 (FLOAT32,QUANT8_ASYMM) PRELU (FLOAT32,QUANT8_ASYMM) +QUANTIZE (FLOAT32,QUANT8_ASYMM) RESIZE_NEAREST_NEIGHBOR (FLOAT32,QUANT8_ASYMM) SOFTMAX (FLOAT32,QUANT8_ASYMM) @@ -71,8 +73,6 @@ The following AndroidNN HAL 1.2 operations are currently not supported: CONCATENATION LSTM -PAD_V2 -QUANTIZE QUANTIZED_16BIT_LSTM TRANSPOSE_CONV_2D -- cgit v1.2.1