aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-07-15 12:02:20 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-07-15 16:40:01 +0000
commit95b1ef62568cc71b4bcdde6af02b26687d020a1e (patch)
treedcba796eb09f9fc75cb53b93c0bd5d601ee2f84c
parent2b6e275e354e9b28a12573aad3bdd4f2a80d52bf (diff)
downloadandroid-nn-driver-95b1ef62568cc71b4bcdde6af02b26687d020a1e.tar.gz
IVGCVSW-3306 Add Maximum support in HAL 1.2 driver
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I94e410058ffc1ce39465498c12e10cb5669e7ed3
-rw-r--r--1.2/HalPolicy.cpp47
-rw-r--r--1.2/HalPolicy.hpp2
-rw-r--r--NnapiSupport.txt2
-rw-r--r--OutputShapeUtils.cpp6
-rw-r--r--OutputShapeUtils.hpp3
5 files changed, 59 insertions, 1 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index d7452c68..9ccac9b4 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -142,6 +142,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertConv2d(operation, model, data);
case V1_2::OperationType::DEPTHWISE_CONV_2D:
return ConvertDepthwiseConv2d(operation, model, data);
+ case V1_2::OperationType::MAXIMUM:
+ return ConvertMaximum(operation, model, data);
case V1_2::OperationType::PAD_V2:
return ConvertPadV2(operation, model, data);
case V1_2::OperationType::PRELU:
@@ -456,6 +458,51 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model&
return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
}
+bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
+{
+ LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
+ LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
+
+ if (!input0.IsValid() || !input1.IsValid())
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+ if (!outputOperand)
+ {
+ return Fail("%s: Could not read output", __func__);
+ }
+
+ const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+ if (IsDynamicOutput(outInfo))
+ {
+ ALOGD("Output shape not set, will infer from inputs");
+ outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
+ }
+
+ if (!IsLayerSupportedForAnyBackend(__func__,
+ armnn::IsMaximumSupported,
+ data.m_Backends,
+ input0.GetTensorInfo(),
+ input1.GetTensorInfo(),
+ outInfo))
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
+ assert(layer != nullptr);
+ BroadcastTensor(input0, input1, layer, *data.m_Network);
+
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
+ 0,
+ *layer,
+ model,
+ data,
+ armnn::Optional<armnn::TensorInfo>(outInfo));
+}
+
bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
{
LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index 25aee820..d0d988f3 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -35,6 +35,8 @@ private:
static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index 462b48e6..eabc534f 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -50,6 +50,7 @@ The following AndroidNN HAL 1.2 operations are currently supported:
CONV_2D (FLOAT32,QUANT8_ASYMM)
DEPTHWISE_CONV_2D (FLOAT32,QUANT8_ASYMM)
+MAXIMUM (FLOAT32,QUANT8_ASYMM)
PRELU (FLOAT32,QUANT8_ASYMM)
RESIZE_NEAREST_NEIGHBOR (FLOAT32,QUANT8_ASYMM)
@@ -69,7 +70,6 @@ The following AndroidNN HAL 1.2 operations are currently not supported:
CONCATENATION
LSTM
MINIMUM
-MAXIMUM
PAD_V2
QUANTIZE
QUANTIZED_16BIT_LSTM
diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp
index 739038ac..b6cdb316 100644
--- a/OutputShapeUtils.cpp
+++ b/OutputShapeUtils.cpp
@@ -54,6 +54,12 @@ bool IsDynamicOutput(const TensorInfo& outputInfo)
return outputInfo.GetNumElements() == 0u;
}
+TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape,
+ const armnn::TensorShape& input1Shape)
+{
+ return CalculateMaxShape(input0Shape, input1Shape);
+}
+
TensorShape InferPadOutputShape(const TensorShape& inputShape,
const std::vector<std::pair<unsigned int, unsigned int>>& padList)
{
diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp
index be255c28..dac4a193 100644
--- a/OutputShapeUtils.hpp
+++ b/OutputShapeUtils.hpp
@@ -12,6 +12,9 @@ namespace armnn_driver
bool IsDynamicOutput(const armnn::TensorInfo& outputInfo);
+armnn::TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape,
+ const armnn::TensorShape& input1Shape);
+
armnn::TensorShape InferPadOutputShape(const armnn::TensorShape& inputShape,
const std::vector<std::pair<unsigned int, unsigned int>>& padList);