From 2e32961e568e8e99a65dd7726bffcd56dfb9f87e Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Wed, 24 Jun 2020 10:57:23 +0100 Subject: IVGCVSW-4623 Add android-nn-driver Support for FILL * Added FILL operator support to HAL 1.3 Driver !armnn:3447 Signed-off-by: Sadik Armagan Change-Id: I271151857d805e8159b9dd675bd6e9e99e6ff69d --- 1.3/HalPolicy.cpp | 8 +++++ 1.3/HalPolicy.hpp | 2 ++ ConversionUtils_1_3.hpp | 86 +++++++++++++++++++++++++++++++++++++++++++++++++ NnapiSupport.txt | 1 + 4 files changed, 97 insertions(+) diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp index 707ef726..1c4a1e36 100644 --- a/1.3/HalPolicy.cpp +++ b/1.3/HalPolicy.cpp @@ -51,6 +51,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertComparison(operation, model, data, ComparisonOperation::Equal); case V1_3::OperationType::EXPAND_DIMS: return ConvertExpandDims(operation, model, data); + case V1_3::OperationType::FILL: + return ConvertFill(operation, model, data); case V1_3::OperationType::FLOOR: return ConvertFloor(operation, model, data); case V1_3::OperationType::FULLY_CONNECTED: @@ -243,6 +245,12 @@ bool HalPolicy::ConvertExpandDims(const Operation& operation, const Model& model return ::ConvertExpandDims(operation, model, data); } +bool HalPolicy::ConvertFill(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_3::HalPolicy::ConvertFill()"); + return ::ConvertFill(operation, model, data); +} + bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_3::HalPolicy::ConvertFloor()"); diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp index 024d3ff5..6df2ce2d 100644 --- a/1.3/HalPolicy.hpp +++ b/1.3/HalPolicy.hpp @@ -73,6 +73,8 @@ private: static bool ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertFill(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data); diff --git a/ConversionUtils_1_3.hpp b/ConversionUtils_1_3.hpp index dada6704..3acb49a7 100644 --- a/ConversionUtils_1_3.hpp +++ b/ConversionUtils_1_3.hpp @@ -65,6 +65,92 @@ bool ConvertElu(const HalOperation& operation, const HalModel& model, Conversion return ::ConvertToActivation(operation, __func__, desc, model, data); } +template +bool ConvertFill(const HalOperation& operation, const HalModel& model, ConversionData& data) +{ + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const HalOperand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output", __func__); + } + + const TensorInfo& inputInfo = input.GetTensorInfo(); + const TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + // Determine data type of output tensor + HalOperandType outputType = output->type; + FillDescriptor descriptor; + // Read the scalar fill value + if (outputType == HalOperandType::TENSOR_FLOAT16) + { + Half value; + + if (!GetInputScalar(operation, 1, HalOperandType::FLOAT16, value, model, data)) + { + return Fail("%s: Operation has invalid inputs %d", __func__, outputType); + } + + descriptor.m_Value = static_cast(value); + } + else if (outputType == HalOperandType::TENSOR_FLOAT32) + { + if (!GetInputScalar(operation, 1, HalOperandType::FLOAT32, descriptor.m_Value, model, data)) + { + return Fail("%s: Operation has invalid inputs %d", __func__, outputType); + } + } + else if (outputType == HalOperandType::TENSOR_INT32) + { + int32_t value; + + if (!GetInputScalar(operation, 1, HalOperandType::INT32, value, model, data)) + { + return Fail("%s: Operation has invalid inputs %d", __func__, outputType); + } + + descriptor.m_Value = static_cast(value); + } + else + { + return Fail("%s: Unsupported input tensor type: %d", __func__, outputType); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsFillSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + layer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + template diff --git a/NnapiSupport.txt b/NnapiSupport.txt index 02013609..6969356e 100644 --- a/NnapiSupport.txt +++ b/NnapiSupport.txt @@ -29,6 +29,7 @@ DIV (FLOAT32, QUANT8_ASYMM) ELU (FLOAT32, QUANT8_ASYMM) EQUAL (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) EXPAND_DIMS (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) +FILL (FLOAT32, FLOAT16, INT32) FLOOR (FLOAT32) FULLY_CONNECTED (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) GREATER (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED) -- cgit v1.2.1