diff options
author | James Ward <james.ward@arm.com> | 2020-11-13 18:05:04 +0000 |
---|---|---|
committer | Jim Flynn <jim.flynn@arm.com> | 2020-11-17 15:36:19 +0000 |
commit | a8578103d1fe621f97ff2cfd842a8e33c1b652c8 (patch) | |
tree | 071986839c5d6aa865fdd4212a83c9c684ef5231 /delegate/src/Softmax.hpp | |
parent | 0cf84423f440aa2cad4b3e5f678d7a5f5b865eb4 (diff) | |
download | armnn-a8578103d1fe621f97ff2cfd842a8e33c1b652c8.tar.gz |
IVGCVSW-5395 TfLiteDelegate: Implement the Softmax operators
Signed-off-by: James Ward <james.ward@arm.com>
Change-Id: I9f098c6b62ebb08e727aa8547e08bddc0b814705
Diffstat (limited to 'delegate/src/Softmax.hpp')
-rw-r--r-- | delegate/src/Softmax.hpp | 128 |
1 files changed, 121 insertions, 7 deletions
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp index ddadbc73c8..0de8e1438c 100644 --- a/delegate/src/Softmax.hpp +++ b/delegate/src/Softmax.hpp @@ -5,7 +5,7 @@ #pragma once -#include <armnn/utility/IgnoreUnused.hpp> +#include "DelegateUtils.hpp" #include <tensorflow/lite/builtin_ops.h> #include <tensorflow/lite/c/builtin_op_data.h> @@ -15,19 +15,133 @@ namespace armnnDelegate { +TfLiteStatus ValidateSoftmaxOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputTensorInfo, + const armnn::SoftmaxDescriptor& descriptor) +{ + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsSoftmaxSupported, + delegateData.m_Backends, + isSupported, + inputInfo, + outputTensorInfo, + descriptor); + return isSupported ? kTfLiteOk : kTfLiteError; +} + + +TfLiteStatus ValidateLogSoftmaxOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputTensorInfo, + const armnn::LogSoftmaxDescriptor& descriptor) +{ + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsLogSoftmaxSupported, + delegateData.m_Backends, + isSupported, + inputInfo, + outputTensorInfo, + descriptor); + return isSupported ? kTfLiteOk : kTfLiteError; +} + TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, int32_t softmaxOperatorCode) { - armnn::IgnoreUnused(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - softmaxOperatorCode); + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (IsDynamicTensor(tfLiteInputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ", + nodeIndex); + return kTfLiteError; + } + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + + if (!delegateData.m_Network) + { + switch(softmaxOperatorCode) + { + case kTfLiteBuiltinSoftmax: + { + armnn::SoftmaxDescriptor descriptor; + auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(tfLiteNode->builtin_data); + descriptor.m_Beta = params->beta; + return ValidateSoftmaxOperator(delegateData, + tfLiteContext, + inputTensorInfo, + outputTensorInfo, + descriptor); + } + case kTfLiteBuiltinLogSoftmax: + { + armnn::LogSoftmaxDescriptor descriptor; + return ValidateLogSoftmaxOperator(delegateData, + tfLiteContext, + inputTensorInfo, + outputTensorInfo, + descriptor); + } + default: + return kTfLiteError; + } + } + + armnn::IConnectableLayer* softmaxLayer = nullptr; + + switch(softmaxOperatorCode) + { + case kTfLiteBuiltinSoftmax: + { + armnn::SoftmaxDescriptor descriptor; + auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(tfLiteNode->builtin_data); + descriptor.m_Beta = params->beta; + softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor); + break; + } + case kTfLiteBuiltinLogSoftmax: + { + armnn::LogSoftmaxDescriptor descriptor; + softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor); + break; + } + default: + return kTfLiteError; + } + ARMNN_ASSERT(softmaxLayer != nullptr); + + armnn::IOutputSlot& outputSlot = softmaxLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); - return kTfLiteError; + // Connect + return Connect(softmaxLayer, tfLiteNode, delegateData); } } // namespace armnnDelegate |