From 89cbb3a914d76843630f3576108e309cf085ef80 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Thu, 11 Feb 2021 21:00:47 +0000 Subject: IVGCVSW-5131 Add support for REDUCE_SUM, REDUCE_MIN and REDUCE_MAX Signed-off-by: Teresa Charlin Change-Id: Ib88544dd6adc3d6a0d2f4c0b395e05b89bc8ad3e --- ConversionUtils_1_2.hpp | 86 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) (limited to 'ConversionUtils_1_2.hpp') diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp index 779d88f4..080b8264 100644 --- a/ConversionUtils_1_2.hpp +++ b/ConversionUtils_1_2.hpp @@ -1934,6 +1934,92 @@ bool ConvertQuantized16BitLstm(const HalOperation& operation, const HalModel& mo } +template +bool ConvertReduce(const HalOperation& operation, + const HalModel& model, + ConversionData& data, + ReduceOperation reduceOperation) +{ + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + + armnn::ReduceDescriptor descriptor; + descriptor.m_ReduceOperation = reduceOperation; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + + const HalOperand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + const HalOperand* axisOperand = GetInputOperand(operation, 1, model); + if (!axisOperand) + { + return Fail("%s: Could not read input 1", __func__); + } + std::vector axis; + if (!GetTensorInt32Values(*axisOperand, axis, model, data)) + { + return Fail("%s: Input 1 has invalid values", __func__); + } + + // Convert the axis to unsigned int and remove duplicates. + unsigned int rank = inputInfo.GetNumDimensions(); + std::set uniqueAxis; + std::transform(axis.begin(), axis.end(), + std::inserter(uniqueAxis, uniqueAxis.begin()), + [rank](int i) -> unsigned int { return (i + rank) % rank; }); + descriptor.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end()); + + // Get the "keep dims" flag. + if (!GetInputScalar(operation, 2, HalOperandType::BOOL, descriptor.m_KeepDims, model, data)) + { + return Fail("%s: Could not read input 2", __func__); + } + + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReduceSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); +} + template -- cgit v1.2.1