aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-06-24 10:57:23 +0100
committerSadik Armagan <sadik.armagan@arm.com>2020-06-24 10:57:23 +0100
commit2e32961e568e8e99a65dd7726bffcd56dfb9f87e (patch)
tree386e409b98aa9d8fa92a59422d9a1bf39c2d836a
parent0cc2b31e81886bbb671a029fdd3687c67543dd1d (diff)
downloadandroid-nn-driver-2e32961e568e8e99a65dd7726bffcd56dfb9f87e.tar.gz
IVGCVSW-4623 Add android-nn-driver Support for FILL
* Added FILL operator support to HAL 1.3 Driver !armnn:3447 Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I271151857d805e8159b9dd675bd6e9e99e6ff69d
-rw-r--r--1.3/HalPolicy.cpp8
-rw-r--r--1.3/HalPolicy.hpp2
-rw-r--r--ConversionUtils_1_3.hpp86
-rw-r--r--NnapiSupport.txt1
4 files changed, 97 insertions, 0 deletions
diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp
index 707ef726..1c4a1e36 100644
--- a/1.3/HalPolicy.cpp
+++ b/1.3/HalPolicy.cpp
@@ -51,6 +51,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
case V1_3::OperationType::EXPAND_DIMS:
return ConvertExpandDims(operation, model, data);
+ case V1_3::OperationType::FILL:
+ return ConvertFill(operation, model, data);
case V1_3::OperationType::FLOOR:
return ConvertFloor(operation, model, data);
case V1_3::OperationType::FULLY_CONNECTED:
@@ -243,6 +245,12 @@ bool HalPolicy::ConvertExpandDims(const Operation& operation, const Model& model
return ::ConvertExpandDims<hal_1_3::HalPolicy>(operation, model, data);
}
+bool HalPolicy::ConvertFill(const Operation& operation, const Model& model, ConversionData& data)
+{
+ ALOGV("hal_1_3::HalPolicy::ConvertFill()");
+ return ::ConvertFill<hal_1_3::HalPolicy>(operation, model, data);
+}
+
bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_3::HalPolicy::ConvertFloor()");
diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp
index 024d3ff5..6df2ce2d 100644
--- a/1.3/HalPolicy.hpp
+++ b/1.3/HalPolicy.hpp
@@ -73,6 +73,8 @@ private:
static bool ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertFill(const Operation& operation, const Model& model, ConversionData& data);
+
static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils_1_3.hpp b/ConversionUtils_1_3.hpp
index dada6704..3acb49a7 100644
--- a/ConversionUtils_1_3.hpp
+++ b/ConversionUtils_1_3.hpp
@@ -66,6 +66,92 @@ bool ConvertElu(const HalOperation& operation, const HalModel& model, Conversion
}
template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalModel = typename HalPolicy::Model>
+bool ConvertFill(const HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
+
+ LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+ if (!input.IsValid())
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output", __func__);
+ }
+
+ const TensorInfo& inputInfo = input.GetTensorInfo();
+ const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
+ // Determine data type of output tensor
+ HalOperandType outputType = output->type;
+ FillDescriptor descriptor;
+ // Read the scalar fill value
+ if (outputType == HalOperandType::TENSOR_FLOAT16)
+ {
+ Half value;
+
+ if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::FLOAT16, value, model, data))
+ {
+ return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
+ }
+
+ descriptor.m_Value = static_cast<float>(value);
+ }
+ else if (outputType == HalOperandType::TENSOR_FLOAT32)
+ {
+ if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::FLOAT32, descriptor.m_Value, model, data))
+ {
+ return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
+ }
+ }
+ else if (outputType == HalOperandType::TENSOR_INT32)
+ {
+ int32_t value;
+
+ if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, value, model, data))
+ {
+ return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
+ }
+
+ descriptor.m_Value = static_cast<float>(value);
+ }
+ else
+ {
+ return Fail("%s: Unsupported input tensor type: %d", __func__, outputType);
+ }
+
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsFillSupported,
+ data.m_Backends,
+ isSupported,
+ inputInfo,
+ outputInfo,
+ descriptor);
+ if (!isSupported)
+ {
+ return false;
+ }
+
+ IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
+ assert(layer != nullptr);
+ input.Connect(layer->GetInputSlot(0));
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
typename HalOperation = typename HalPolicy::Operation,
typename HalModel = typename HalPolicy::Model>
bool ConvertQuantizedLstm(const HalOperation& operation, const HalModel& model, ConversionData& data)
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index 02013609..6969356e 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -29,6 +29,7 @@ DIV (FLOAT32, QUANT8_ASYMM)
ELU (FLOAT32, QUANT8_ASYMM)
EQUAL (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
EXPAND_DIMS (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+FILL (FLOAT32, FLOAT16, INT32)
FLOOR (FLOAT32)
FULLY_CONNECTED (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
GREATER (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)