diff options
author | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2019-07-15 12:02:20 +0100 |
---|---|---|
committer | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2019-07-15 16:40:01 +0000 |
commit | 95b1ef62568cc71b4bcdde6af02b26687d020a1e (patch) | |
tree | dcba796eb09f9fc75cb53b93c0bd5d601ee2f84c /1.2 | |
parent | 2b6e275e354e9b28a12573aad3bdd4f2a80d52bf (diff) | |
download | android-nn-driver-95b1ef62568cc71b4bcdde6af02b26687d020a1e.tar.gz |
IVGCVSW-3306 Add Maximum support in HAL 1.2 driver
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I94e410058ffc1ce39465498c12e10cb5669e7ed3
Diffstat (limited to '1.2')
-rw-r--r-- | 1.2/HalPolicy.cpp | 47 | ||||
-rw-r--r-- | 1.2/HalPolicy.hpp | 2 |
2 files changed, 49 insertions, 0 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index d7452c68..9ccac9b4 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -142,6 +142,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertConv2d(operation, model, data); case V1_2::OperationType::DEPTHWISE_CONV_2D: return ConvertDepthwiseConv2d(operation, model, data); + case V1_2::OperationType::MAXIMUM: + return ConvertMaximum(operation, model, data); case V1_2::OperationType::PAD_V2: return ConvertPadV2(operation, model, data); case V1_2::OperationType::PRELU: @@ -456,6 +458,51 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data); } +bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model); + if (!outputOperand) + { + return Fail("%s: Could not read output", __func__); + } + + const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicOutput(outInfo)) + { + ALOGD("Output shape not set, will infer from inputs"); + outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape())); + } + + if (!IsLayerSupportedForAnyBackend(__func__, + armnn::IsMaximumSupported, + data.m_Backends, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outInfo)) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer(); + assert(layer != nullptr); + BroadcastTensor(input0, input1, layer, *data.m_Network); + + return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, + 0, + *layer, + model, + data, + armnn::Optional<armnn::TensorInfo>(outInfo)); +} + bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data) { LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index 25aee820..d0d988f3 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -35,6 +35,8 @@ private: static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data); |