diff options
author | Matthew Sloyan <matthew.sloyan@arm.com> | 2020-11-26 10:54:22 +0000 |
---|---|---|
committer | Matthew Sloyan <matthew.sloyan@arm.com> | 2020-12-07 09:41:42 +0000 |
commit | c8eb955a2c9f0b432fe932e2df8445f242080e31 (patch) | |
tree | cb3a74ae4d3a2a558f0589a45a6d1ea6d58e02c3 /delegate/src/LogicalBinary.hpp | |
parent | 97451b4429b717f6ff19c10716d1d82a2ff6f155 (diff) | |
download | armnn-c8eb955a2c9f0b432fe932e2df8445f242080e31.tar.gz |
IVGCVSW-5381 TfLiteDelegate: Implement the Logical operators
* Implemented Logical AND, NOT and OR operators.
* NOT uses existing ElementwiseUnary VisitLayer function & tests.
* AND/OR uses new LogicalBinary VisitLayer function & tests.
Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I5e7f1e78b30c36ac7f14c70a712b54f98d664b83
Diffstat (limited to 'delegate/src/LogicalBinary.hpp')
-rw-r--r-- | delegate/src/LogicalBinary.hpp | 122 |
1 files changed, 122 insertions, 0 deletions
diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp new file mode 100644 index 0000000000..07b55c3e32 --- /dev/null +++ b/delegate/src/LogicalBinary.hpp @@ -0,0 +1,122 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <tensorflow/lite/builtin_ops.h> +#include <tensorflow/lite/c/builtin_op_data.h> +#include <tensorflow/lite/c/common.h> +#include <tensorflow/lite/minimal_logging.h> + +namespace armnnDelegate +{ + +TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + TfLiteNode* tfLiteNode, + int nodeIndex, + int32_t logicalOperatorCode, + armnn::LogicalBinaryOperation binaryOperation) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor0, logicalOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor1, logicalOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteOutputTensor, logicalOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0); + armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + // Setup descriptor and assign operation + armnn::LogicalBinaryDescriptor desc; + desc.m_Operation = binaryOperation; + + // Check if supported + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsLogicalBinarySupported, + delegateData.m_Backends, + isSupported, + inputTensorInfo0, + inputTensorInfo1, + outputTensorInfo, + desc); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc); + ARMNN_ASSERT(logicalBinaryLayer != nullptr); + + armnn::IOutputSlot& outputSlot = logicalBinaryLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + if(tflite::IsConstantTensor(&tfLiteInputTensor0)) + { + auto status = ConnectConstant(logicalBinaryLayer, + inputTensorInfo0, + tfLiteContext, + tfLiteInputTensor0, + delegateData, + tfLiteNode->inputs->data[0]); + if (status == kTfLiteError) + { + return status; + } + } + + if(tflite::IsConstantTensor(&tfLiteInputTensor1)) + { + auto status = ConnectConstant(logicalBinaryLayer, + inputTensorInfo1, + tfLiteContext, + tfLiteInputTensor1, + delegateData, + tfLiteNode->inputs->data[1]); + if (status == kTfLiteError) + { + return status; + } + } + + // LogicalBinary operators support broadcasting + auto reshapeLayer = BroadcastTensor(inputTensorInfo0, + inputTensorInfo1, + logicalBinaryLayer, + tfLiteContext, + tfLiteNode, + delegateData); + if (!reshapeLayer) + { + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace armnnDelegate |