From 2e32961e568e8e99a65dd7726bffcd56dfb9f87e Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Wed, 24 Jun 2020 10:57:23 +0100 Subject: IVGCVSW-4623 Add android-nn-driver Support for FILL * Added FILL operator support to HAL 1.3 Driver !armnn:3447 Signed-off-by: Sadik Armagan Change-Id: I271151857d805e8159b9dd675bd6e9e99e6ff69d --- ConversionUtils_1_3.hpp | 86 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) (limited to 'ConversionUtils_1_3.hpp') diff --git a/ConversionUtils_1_3.hpp b/ConversionUtils_1_3.hpp index dada6704..3acb49a7 100644 --- a/ConversionUtils_1_3.hpp +++ b/ConversionUtils_1_3.hpp @@ -65,6 +65,92 @@ bool ConvertElu(const HalOperation& operation, const HalModel& model, Conversion return ::ConvertToActivation(operation, __func__, desc, model, data); } +template +bool ConvertFill(const HalOperation& operation, const HalModel& model, ConversionData& data) +{ + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const HalOperand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output", __func__); + } + + const TensorInfo& inputInfo = input.GetTensorInfo(); + const TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + // Determine data type of output tensor + HalOperandType outputType = output->type; + FillDescriptor descriptor; + // Read the scalar fill value + if (outputType == HalOperandType::TENSOR_FLOAT16) + { + Half value; + + if (!GetInputScalar(operation, 1, HalOperandType::FLOAT16, value, model, data)) + { + return Fail("%s: Operation has invalid inputs %d", __func__, outputType); + } + + descriptor.m_Value = static_cast(value); + } + else if (outputType == HalOperandType::TENSOR_FLOAT32) + { + if (!GetInputScalar(operation, 1, HalOperandType::FLOAT32, descriptor.m_Value, model, data)) + { + return Fail("%s: Operation has invalid inputs %d", __func__, outputType); + } + } + else if (outputType == HalOperandType::TENSOR_INT32) + { + int32_t value; + + if (!GetInputScalar(operation, 1, HalOperandType::INT32, value, model, data)) + { + return Fail("%s: Operation has invalid inputs %d", __func__, outputType); + } + + descriptor.m_Value = static_cast(value); + } + else + { + return Fail("%s: Unsupported input tensor type: %d", __func__, outputType); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsFillSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + layer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + template -- cgit v1.2.1