From 95b1ef62568cc71b4bcdde6af02b26687d020a1e Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 15 Jul 2019 12:02:20 +0100 Subject: IVGCVSW-3306 Add Maximum support in HAL 1.2 driver Signed-off-by: Narumol Prangnawarat Change-Id: I94e410058ffc1ce39465498c12e10cb5669e7ed3 --- 1.2/HalPolicy.cpp | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1.2/HalPolicy.hpp | 2 ++ 2 files changed, 49 insertions(+) (limited to '1.2') diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index d7452c68..9ccac9b4 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -142,6 +142,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertConv2d(operation, model, data); case V1_2::OperationType::DEPTHWISE_CONV_2D: return ConvertDepthwiseConv2d(operation, model, data); + case V1_2::OperationType::MAXIMUM: + return ConvertMaximum(operation, model, data); case V1_2::OperationType::PAD_V2: return ConvertPadV2(operation, model, data); case V1_2::OperationType::PRELU: @@ -456,6 +458,51 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); } +bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* outputOperand = GetOutputOperand(operation, 0, model); + if (!outputOperand) + { + return Fail("%s: Could not read output", __func__); + } + + const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicOutput(outInfo)) + { + ALOGD("Output shape not set, will infer from inputs"); + outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape())); + } + + if (!IsLayerSupportedForAnyBackend(__func__, + armnn::IsMaximumSupported, + data.m_Backends, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outInfo)) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer(); + assert(layer != nullptr); + BroadcastTensor(input0, input1, layer, *data.m_Network); + + return SetupAndTrackLayerOutputSlot(operation, + 0, + *layer, + model, + data, + armnn::Optional(outInfo)); +} + bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data) { LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index 25aee820..d0d988f3 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -35,6 +35,8 @@ private: static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data); -- cgit v1.2.1