aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2019-11-19 12:06:47 +0000
committerKevin May <kevin.may@arm.com>2019-11-19 18:34:35 +0000
commita23334e1aa1ffc524ce016edf6250482d1f5330d (patch)
tree62383c52829cac3f53d6aeab69715061062abb6e
parentd7de165909d24c7ece4a151890a7219b93a0528c (diff)
downloadandroid-nn-driver-a23334e1aa1ffc524ce016edf6250482d1f5330d.tar.gz
IVGCVSW-3697 Add Support for ANEURALNETWORKS_ARG[MAX|MIN] to HAL 1.2 Driver
!armnn:2313 Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Change-Id: I543136e4e2ef9aece1378d2642064cc585246645
-rw-r--r--1.2/HalPolicy.cpp13
-rw-r--r--1.2/HalPolicy.hpp5
-rw-r--r--ConversionUtils.hpp76
3 files changed, 94 insertions, 0 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index c8e29688..e6f8acbb 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -88,6 +88,10 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model,
return ConvertAbs(operation, model, data);
case V1_2::OperationType::ADD:
return ConvertAdd(operation, model, data);
+ case V1_2::OperationType::ARGMAX:
+ return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
+ case V1_2::OperationType::ARGMIN:
+ return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min);
case V1_2::OperationType::AVERAGE_POOL_2D:
return ConvertAveragePool2d(operation, model, data);
case V1_2::OperationType::BATCH_TO_SPACE_ND:
@@ -210,6 +214,15 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve
return ::ConvertAdd<hal_1_2::HalPolicy>(operation, model, data);
}
+bool HalPolicy::ConvertArgMinMax(const V1_2::Operation& operation,
+ const V1_2::Model& model,
+ ConversionData& data,
+ armnn::ArgMinMaxFunction argMinMaxFunction)
+{
+ ALOGV("hal_1_2::HalPolicy::ConvertArgMinMax()");
+ return ::ConvertArgMinMax<hal_1_2::HalPolicy>(operation, model, data, argMinMaxFunction);
+}
+
bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
{
ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index d611102b..1f0b54d7 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -35,6 +35,11 @@ private:
static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
+ static bool ConvertArgMinMax(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::ArgMinMaxFunction argMinMaxFunction);
+
static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data);
static bool ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 0637c2b5..a284a50a 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1564,6 +1564,82 @@ bool ConvertAdd(const Operation& operation, const Model& model, ConversionData&
template<typename HalPolicy,
typename Operation = typename HalPolicy::Operation,
typename Model = typename HalPolicy::Model>
+bool ConvertArgMinMax(const Operation& operation,
+ const Model& model,
+ ConversionData& data,
+ armnn::ArgMinMaxFunction argMinMaxFunction)
+{
+ ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
+
+ using HalOperand = typename HalPolicy::Operand;
+ using HalOperandType = typename HalPolicy::OperandType;
+
+ LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+
+ if (!input0.IsValid())
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ int32_t axis;
+ if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
+ {
+ return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
+ }
+
+ const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
+ int rank = static_cast<int>(inputInfo.GetNumDimensions());
+
+ if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
+ {
+ // Square bracket denotes inclusive n while parenthesis denotes exclusive n
+ // E.g. Rank 4 tensor can have axis in range [-4, 3)
+ // -1 == 3, -2 == 2, -3 == 1, -4 == 0
+ return Fail("%s: Axis must be in range [-n, n)", __func__);
+ }
+
+ const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output 0", __func__);
+ }
+
+ const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ return Fail("%s: Dynamic output tensors are not supported", __func__);
+ }
+
+ armnn::ArgMinMaxDescriptor descriptor;
+ descriptor.m_Function = argMinMaxFunction;
+ descriptor.m_Axis = axis;
+
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsArgMinMaxSupported,
+ data.m_Backends,
+ isSupported,
+ inputInfo0,
+ outputInfo,
+ descriptor);
+ if (!isSupported)
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
+ assert(layer != nullptr);
+
+ input0.Connect(layer->GetInputSlot(0));
+
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
+}
+
+template<typename HalPolicy,
+ typename Operation = typename HalPolicy::Operation,
+ typename Model = typename HalPolicy::Model>
bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
{
using HalOperand = typename HalPolicy::Operand;