From 89cbb3a914d76843630f3576108e309cf085ef80 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Thu, 11 Feb 2021 21:00:47 +0000 Subject: IVGCVSW-5131 Add support for REDUCE_SUM, REDUCE_MIN and REDUCE_MAX Signed-off-by: Teresa Charlin Change-Id: Ib88544dd6adc3d6a0d2f4c0b395e05b89bc8ad3e --- 1.2/HalPolicy.cpp | 15 ++++++--- 1.2/HalPolicy.hpp | 3 +- 1.3/HalPolicy.cpp | 15 ++++++--- 1.3/HalPolicy.hpp | 3 +- ConversionUtils_1_2.hpp | 86 +++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 112 insertions(+), 10 deletions(-) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 557b78cf..7d45688a 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -86,7 +86,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, case V1_2::OperationType::MAXIMUM: return ConvertMaximum(operation, model, data); case V1_2::OperationType::MEAN: - return ConvertMean(operation, model, data); + return ConvertReduce(operation, model, data, ReduceOperation::Mean); case V1_2::OperationType::MINIMUM: return ConvertMinimum(operation, model, data); case V1_2::OperationType::MUL: @@ -105,6 +105,12 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertQuantize(operation, model, data); case V1_2::OperationType::QUANTIZED_16BIT_LSTM: return ConvertQuantized16BitLstm(operation, model, data); + case V1_2::OperationType::REDUCE_MAX: + return ConvertReduce(operation, model, data, ReduceOperation::Max); + case V1_2::OperationType::REDUCE_MIN: + return ConvertReduce(operation, model, data, ReduceOperation::Min); + case V1_2::OperationType::REDUCE_SUM: + return ConvertReduce(operation, model, data, ReduceOperation::Sum); case V1_2::OperationType::RELU: return ConvertReLu(operation, model, data); case V1_2::OperationType::RELU1: @@ -306,10 +312,11 @@ bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, C return ::ConvertMaximum(operation, model, data); } -bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data) +bool HalPolicy::ConvertReduce(const Operation& operation, const Model& model, ConversionData& data, + ReduceOperation reduceOperation) { - ALOGV("hal_1_2::HalPolicy::ConvertMean()"); - return ::ConvertMean(operation, model, data); + ALOGV("hal_1_2::HalPolicy::ConvertReduce()"); + return ::ConvertReduce(operation, model, data, reduceOperation); } bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data) diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index be02c22f..feb2ba20 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -98,7 +98,8 @@ private: static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data); - static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertReduce(const Operation& operation, const Model& model, ConversionData& data, + ReduceOperation reduce_operation); static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data); diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp index 93337594..9a9f2ac0 100644 --- a/1.3/HalPolicy.cpp +++ b/1.3/HalPolicy.cpp @@ -98,7 +98,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, case V1_3::OperationType::MAXIMUM: return ConvertMaximum(operation, model, data); case V1_3::OperationType::MEAN: - return ConvertMean(operation, model, data); + return ConvertReduce(operation, model, data, ReduceOperation::Mean); case V1_3::OperationType::MINIMUM: return ConvertMinimum(operation, model, data); case V1_3::OperationType::MUL: @@ -121,6 +121,12 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertQuantized16BitLstm(operation, model, data); case V1_3::OperationType::RANK: return ConvertRank(operation, model, data); + case V1_3::OperationType::REDUCE_MAX: + return ConvertReduce(operation, model, data, ReduceOperation::Max); + case V1_3::OperationType::REDUCE_MIN: + return ConvertReduce(operation, model, data, ReduceOperation::Min); + case V1_3::OperationType::REDUCE_SUM: + return ConvertReduce(operation, model, data, ReduceOperation::Sum); case V1_3::OperationType::RELU: return ConvertReLu(operation, model, data); case V1_3::OperationType::RELU1: @@ -359,10 +365,11 @@ bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, C return ::ConvertMaximum(operation, model, data); } -bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data) +bool HalPolicy::ConvertReduce(const Operation& operation, const Model& model, ConversionData& data, + ReduceOperation reduceOperation) { - ALOGV("hal_1_3::HalPolicy::ConvertMean()"); - return ::ConvertMean(operation, model, data); + ALOGV("hal_1_3::HalPolicy::ConvertReduce()"); + return ::ConvertReduce(operation, model, data, reduceOperation); } bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data) diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp index f82a5ef7..1337bf20 100644 --- a/1.3/HalPolicy.hpp +++ b/1.3/HalPolicy.hpp @@ -110,7 +110,8 @@ private: static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data); - static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertReduce(const Operation& operation, const Model& model, ConversionData& data, + ReduceOperation reduceOperation); static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data); diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp index 779d88f4..080b8264 100644 --- a/ConversionUtils_1_2.hpp +++ b/ConversionUtils_1_2.hpp @@ -1934,6 +1934,92 @@ bool ConvertQuantized16BitLstm(const HalOperation& operation, const HalModel& mo } +template +bool ConvertReduce(const HalOperation& operation, + const HalModel& model, + ConversionData& data, + ReduceOperation reduceOperation) +{ + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + + armnn::ReduceDescriptor descriptor; + descriptor.m_ReduceOperation = reduceOperation; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + + const HalOperand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + const HalOperand* axisOperand = GetInputOperand(operation, 1, model); + if (!axisOperand) + { + return Fail("%s: Could not read input 1", __func__); + } + std::vector axis; + if (!GetTensorInt32Values(*axisOperand, axis, model, data)) + { + return Fail("%s: Input 1 has invalid values", __func__); + } + + // Convert the axis to unsigned int and remove duplicates. + unsigned int rank = inputInfo.GetNumDimensions(); + std::set uniqueAxis; + std::transform(axis.begin(), axis.end(), + std::inserter(uniqueAxis, uniqueAxis.begin()), + [rank](int i) -> unsigned int { return (i + rank) % rank; }); + descriptor.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end()); + + // Get the "keep dims" flag. + if (!GetInputScalar(operation, 2, HalOperandType::BOOL, descriptor.m_KeepDims, model, data)) + { + return Fail("%s: Could not read input 2", __func__); + } + + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReduceSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); +} + template -- cgit v1.2.1