aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEllen Norris-Thompson <ellen.norris-thompson@arm.com>2019-07-11 17:27:37 +0100
committerEllen Norris-Thompson <ellen.norris-thompson@arm.com>2019-07-16 16:21:08 +0100
commit1cb29aa8471f4bf78f53d5cbf0b9dee7fa6b0615 (patch)
treebd73f2bcdce8b9bdfb37b16f6c39ff5aa36b8d08
parent92ec725cb80f6c04f4bc952422fe285f3f42d559 (diff)
downloadandroid-nn-driver-1cb29aa8471f4bf78f53d5cbf0b9dee7fa6b0615.tar.gz
IVGCVSW-3305 Support Minimum in HAL1.2 Driver
* Adds conversion method to HAL1.2 Policy Signed-off-by: Ellen Norris-Thompson <ellen.norris-thompson@arm.com> Change-Id: Ib282f32b2f5e899f1fb085bf615df5f984b6fd34
-rw-r--r--1.2/HalPolicy.cpp52
-rw-r--r--1.2/HalPolicy.hpp2
-rw-r--r--NnapiSupport.txt2
-rw-r--r--OutputShapeUtils.cpp6
-rw-r--r--OutputShapeUtils.hpp3
5 files changed, 64 insertions, 1 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 5b501dbc..7a52fcc4 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -144,6 +144,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertDepthwiseConv2d(operation, model, data);
case V1_2::OperationType::MAXIMUM:
return ConvertMaximum(operation, model, data);
+ case V1_2::OperationType::MINIMUM:
+ return ConvertMinimum(operation, model, data);
case V1_2::OperationType::PAD_V2:
return ConvertPadV2(operation, model, data);
case V1_2::OperationType::PRELU:
@@ -551,6 +553,56 @@ bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, C
armnn::Optional<armnn::TensorInfo>(outInfo));
}
+bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
+{
+ LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
+ LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
+
+ if (!input0.IsValid() || !input1.IsValid())
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ ALOGD("Output shape not set, will infer from inputs");
+ outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(),
+ input1.GetTensorInfo().GetShape()));
+ }
+
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsMinimumSupported,
+ data.m_Backends,
+ isSupported,
+ input0.GetTensorInfo(),
+ input1.GetTensorInfo(),
+ outputInfo);
+
+ if (!isSupported)
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
+ assert(layer != nullptr);
+ BroadcastTensor(input0, input1, layer, *data.m_Network);
+
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
+ 0,
+ *layer,
+ model,
+ data,
+ armnn::Optional<armnn::TensorInfo>(outputInfo));
+}
+
bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
{
LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index d0d988f3..18cf0359 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -37,6 +37,8 @@ private:
static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index eabc534f..8c7a814c 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -51,6 +51,7 @@ The following AndroidNN HAL 1.2 operations are currently supported:
CONV_2D (FLOAT32,QUANT8_ASYMM)
DEPTHWISE_CONV_2D (FLOAT32,QUANT8_ASYMM)
MAXIMUM (FLOAT32,QUANT8_ASYMM)
+MINIMUM (FLOAT32,QUANT8_ASYMM)
PRELU (FLOAT32,QUANT8_ASYMM)
RESIZE_NEAREST_NEIGHBOR (FLOAT32,QUANT8_ASYMM)
@@ -69,7 +70,6 @@ The following AndroidNN HAL 1.2 operations are currently not supported:
CONCATENATION
LSTM
-MINIMUM
PAD_V2
QUANTIZE
QUANTIZED_16BIT_LSTM
diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp
index 6c936ee7..e3812a36 100644
--- a/OutputShapeUtils.cpp
+++ b/OutputShapeUtils.cpp
@@ -121,6 +121,12 @@ TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape,
return CalculateMaxShape(input0Shape, input1Shape);
}
+TensorShape InferMinimumOutputShape(const armnn::TensorShape& input0Shape,
+ const armnn::TensorShape& input1Shape)
+{
+ return CalculateMaxShape(input0Shape, input1Shape);
+}
+
TensorShape InferPadOutputShape(const TensorShape& inputShape,
const std::vector<std::pair<unsigned int, unsigned int>>& padList)
{
diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp
index 2a832618..7452ced9 100644
--- a/OutputShapeUtils.hpp
+++ b/OutputShapeUtils.hpp
@@ -23,6 +23,9 @@ armnn::TensorShape InferDepthwiseConvolution2dOutputShape(const armnn::TensorSha
armnn::TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape,
const armnn::TensorShape& input1Shape);
+armnn::TensorShape InferMinimumOutputShape(const armnn::TensorShape& input0Shape,
+ const armnn::TensorShape& input1Shape);
+
armnn::TensorShape InferPadOutputShape(const armnn::TensorShape& inputShape,
const std::vector<std::pair<unsigned int, unsigned int>>& padList);