diff options
author | Teresa Charlin <teresa.charlinreyes@arm.com> | 2021-02-11 21:00:47 +0000 |
---|---|---|
committer | TeresaARM <teresa.charlinreyes@arm.com> | 2021-02-24 11:19:28 +0000 |
commit | 89cbb3a914d76843630f3576108e309cf085ef80 (patch) | |
tree | 7263f45b229b1f51f9f7d4bfe8aa5d5ce08f5128 /ConversionUtils_1_2.hpp | |
parent | 23ed7bfb9b401d353ab2108029b3a13d1645affe (diff) | |
download | android-nn-driver-89cbb3a914d76843630f3576108e309cf085ef80.tar.gz |
IVGCVSW-5131 Add support for REDUCE_SUM, REDUCE_MIN and REDUCE_MAX
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ib88544dd6adc3d6a0d2f4c0b395e05b89bc8ad3e
Diffstat (limited to 'ConversionUtils_1_2.hpp')
-rw-r--r-- | ConversionUtils_1_2.hpp | 86 |
1 files changed, 86 insertions, 0 deletions
diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp index 779d88f4..080b8264 100644 --- a/ConversionUtils_1_2.hpp +++ b/ConversionUtils_1_2.hpp @@ -1937,6 +1937,92 @@ bool ConvertQuantized16BitLstm(const HalOperation& operation, const HalModel& mo template<typename HalPolicy, typename HalOperation = typename HalPolicy::Operation, typename HalModel = typename HalPolicy::Model> +bool ConvertReduce(const HalOperation& operation, + const HalModel& model, + ConversionData& data, + ReduceOperation reduceOperation) +{ + using HalOperand = typename HalPolicy::Operand; + using HalOperandType = typename HalPolicy::OperandType; + + armnn::ReduceDescriptor descriptor; + descriptor.m_ReduceOperation = reduceOperation; + + LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + + const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model); + if (!axisOperand) + { + return Fail("%s: Could not read input 1", __func__); + } + std::vector<int32_t> axis; + if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data)) + { + return Fail("%s: Input 1 has invalid values", __func__); + } + + // Convert the axis to unsigned int and remove duplicates. + unsigned int rank = inputInfo.GetNumDimensions(); + std::set<unsigned int> uniqueAxis; + std::transform(axis.begin(), axis.end(), + std::inserter(uniqueAxis, uniqueAxis.begin()), + [rank](int i) -> unsigned int { return (i + rank) % rank; }); + descriptor.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end()); + + // Get the "keep dims" flag. + if (!GetInputScalar<HalPolicy>(operation, 2, HalOperandType::BOOL, descriptor.m_KeepDims, model, data)) + { + return Fail("%s: Could not read input 2", __func__); + } + + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReduceSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc); +} + +template<typename HalPolicy, + typename HalOperation = typename HalPolicy::Operation, + typename HalModel = typename HalPolicy::Model> bool ConvertResize(const HalOperation& operation, const HalModel& model, ConversionData& data, |