diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2021-02-09 10:28:54 +0000 |
---|---|---|
committer | Sadik Armagan <sadik.armagan@arm.com> | 2021-02-09 10:31:14 +0000 |
commit | a2747487fbe7eb6d9f5357c6d16c32355ed6e01c (patch) | |
tree | 6f6f8b38100d16f1ec8a0e5be71e8e6ae1cc600a /delegate/src/Reduce.hpp | |
parent | ac001eebca101f2df4973d2f1d8cfca026e07419 (diff) | |
download | armnn-a2747487fbe7eb6d9f5357c6d16c32355ed6e01c.tar.gz |
MLCE-347 'REDUCE_MIN, REDUCE_MAX, REDUCE_SUM Support'
* Added TfLiteParser support for REDUCE_MIN and REDUCE_MAX operators
* Added ACL workloads support for REDUCE_MIN, REDUCE_MAX, and REDUCE_SUM operators
* Added TfLite Delegate support for REDUCE_MIN, REDUCE_MAX, and REDUCE_SUM operators
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I8085d59946bfd4ab78a59a61f899031ae53371a8
Diffstat (limited to 'delegate/src/Reduce.hpp')
-rw-r--r-- | delegate/src/Reduce.hpp | 133 |
1 files changed, 133 insertions, 0 deletions
diff --git a/delegate/src/Reduce.hpp b/delegate/src/Reduce.hpp new file mode 100644 index 0000000000..13a11d3e61 --- /dev/null +++ b/delegate/src/Reduce.hpp @@ -0,0 +1,133 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <tensorflow/lite/builtin_ops.h> +#include <tensorflow/lite/c/builtin_op_data.h> +#include <tensorflow/lite/c/common.h> +#include <tensorflow/lite/kernels/internal/tensor_ctypes.h> +#include <tensorflow/lite/minimal_logging.h> + +namespace armnnDelegate +{ + +TfLiteStatus VisitReduceOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + TfLiteNode* tfLiteNode, + int nodeIndex, + int32_t reduceOperatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor, reduceOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteOutputTensor, reduceOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + // Get const axis value from model and set it to descriptor. + const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]]; + if (!IsValid(tfLiteContext, tfLiteAxisTensor, reduceOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor); + auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor); + + std::vector<int32_t> axis; + // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis. + if (axisTensorData != nullptr) + { + for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i) + { + axis.emplace_back(axisTensorData[i]); + } + } + else + { + for (unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); ++i) + { + axis.push_back(i); + } + } + + // Convert the axis to unsigned int and remove duplicates. + unsigned int rank = inputTensorInfo.GetNumDimensions(); + std::set<unsigned int> uniqueAxis; + std::transform(axis.begin(), + axis.end(), + std::inserter(uniqueAxis, uniqueAxis.begin()), + [rank](int i)->unsigned int{ return (i + rank) % rank; }); + + armnn::ReduceDescriptor desc; + desc.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end()); + + auto* reducerParameters = reinterpret_cast<TfLiteReducerParams*>(tfLiteNode->builtin_data); + desc.m_KeepDims = reducerParameters->keep_dims; + if (reduceOperatorCode == kTfLiteBuiltinReduceMax) + { + desc.m_ReduceOperation = armnn::ReduceOperation::Max; + } + else if (reduceOperatorCode == kTfLiteBuiltinReduceMin) + { + desc.m_ReduceOperation = armnn::ReduceOperation::Min; + } + else if (reduceOperatorCode == kTfLiteBuiltinSum) + { + desc.m_ReduceOperation = armnn::ReduceOperation::Sum; + } + else + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Unsupported Reduction Operator #%d node #%d: ", + reduceOperatorCode, nodeIndex); + return kTfLiteError; + } + + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsReduceSupported, + delegateData.m_Backends, + isSupported, + inputTensorInfo, + outInfo, + desc); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + // Add an Reduce layer + armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // Connect + return Connect(layer, tfLiteNode, delegateData); +} + +} // namespace armnnDelegate |