From 19fa0cc4fd4915451c3f56f6f4bdb51d8a5ac172 Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Tue, 19 Nov 2019 12:06:47 +0000 Subject: IVGCVSW-3697 Add Support for ANEURALNETWORKS_ARG[MAX|MIN] to HAL 1.2 Driver !armnn:2313 Signed-off-by: Francis Murtagh Change-Id: I543136e4e2ef9aece1378d2642064cc585246645 --- 1.2/HalPolicy.cpp | 13 +++++++++ 1.2/HalPolicy.hpp | 5 ++++ ConversionUtils.hpp | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 94 insertions(+) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index c8e29688..e6f8acbb 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -88,6 +88,10 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertAbs(operation, model, data); case V1_2::OperationType::ADD: return ConvertAdd(operation, model, data); + case V1_2::OperationType::ARGMAX: + return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max); + case V1_2::OperationType::ARGMIN: + return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min); case V1_2::OperationType::AVERAGE_POOL_2D: return ConvertAveragePool2d(operation, model, data); case V1_2::OperationType::BATCH_TO_SPACE_ND: @@ -210,6 +214,15 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve return ::ConvertAdd(operation, model, data); } +bool HalPolicy::ConvertArgMinMax(const V1_2::Operation& operation, + const V1_2::Model& model, + ConversionData& data, + armnn::ArgMinMaxFunction argMinMaxFunction) +{ + ALOGV("hal_1_2::HalPolicy::ConvertArgMinMax()"); + return ::ConvertArgMinMax(operation, model, data, argMinMaxFunction); +} + bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()"); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index d611102b..1f0b54d7 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -35,6 +35,11 @@ private: static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertArgMinMax(const Operation& operation, + const Model& model, + ConversionData& data, + armnn::ArgMinMaxFunction argMinMaxFunction); + static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data); diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 0637c2b5..a284a50a 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -1561,6 +1561,82 @@ bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& } } +template +bool ConvertArgMinMax(const Operation& operation, + const Model& model, + ConversionData& data, + armnn::ArgMinMaxFunction argMinMaxFunction) +{ + ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction)); + + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + + if (!input0.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + int32_t axis; + if (!GetInputScalar(operation, 1, HalOperandType::INT32, axis, model, data)) + { + return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__); + } + + const armnn::TensorInfo& inputInfo = input0.GetTensorInfo(); + int rank = static_cast(inputInfo.GetNumDimensions()); + + if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0))) + { + // Square bracket denotes inclusive n while parenthesis denotes exclusive n + // E.g. Rank 4 tensor can have axis in range [-4, 3) + // -1 == 3, -2 == 2, -3 == 1, -4 == 0 + return Fail("%s: Axis must be in range [-n, n)", __func__); + } + + const HalOperand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo(); + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + armnn::ArgMinMaxDescriptor descriptor; + descriptor.m_Function = argMinMaxFunction; + descriptor.m_Axis = axis; + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsArgMinMaxSupported, + data.m_Backends, + isSupported, + inputInfo0, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor); + assert(layer != nullptr); + + input0.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + template -- cgit v1.2.1