aboutsummaryrefslogtreecommitdiff
path: root/delegate/src/DelegateUtils.hpp
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-10-23 17:14:43 +0100
committerJim Flynn <jim.flynn@arm.com>2020-10-27 13:51:58 +0000
commit62483bee640e7d8accf6ac77b24c6e9828841851 (patch)
treeba7025bc86819c3d787428dd16b5be73b90a4353 /delegate/src/DelegateUtils.hpp
parent3d1323ff87fa92ff9cfc74097148b97fa1784416 (diff)
downloadarmnn-62483bee640e7d8accf6ac77b24c6e9828841851.tar.gz
IVGCVSW-5366 'Add a do nothing SubGraph class'
IVGCVSW-5373 'Implement the ABS operator in the Delegate' * Added a Switch statement into the VisitNode() function * Separated the Visit functions into the categorized source files * Implemented VisitElementwiseUnary() function * Added tests for ABS and SQRT Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: If9654d0a8d8ff7dcd6fb5cbe0dc312941772affb
Diffstat (limited to 'delegate/src/DelegateUtils.hpp')
-rw-r--r--delegate/src/DelegateUtils.hpp205
1 files changed, 205 insertions, 0 deletions
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
new file mode 100644
index 0000000000..16dc8a81d4
--- /dev/null
+++ b/delegate/src/DelegateUtils.hpp
@@ -0,0 +1,205 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/BackendHelper.hpp>
+#include <armnn/utility/Assert.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace
+{
+
+// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
+#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
+try \
+{ \
+ for (auto&& backendId : backends) \
+ { \
+ auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
+ if (layerSupportObject) \
+ { \
+ std::string reasonIfUnsupported; \
+ supported = \
+ layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
+ if (supported) \
+ { \
+ break; \
+ } \
+ else \
+ { \
+ if (reasonIfUnsupported.size() > 0) \
+ { \
+ TF_LITE_KERNEL_LOG( \
+ tfLiteContext, "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
+ } \
+ else \
+ { \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by armnn", funcName); \
+ } \
+ } \
+ } \
+ else \
+ { \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
+ } \
+ } \
+ if (!supported) \
+ { \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
+ } \
+} \
+catch (const armnn::InvalidArgumentException &e) \
+{ \
+ throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
+}
+
+TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ const unsigned int expectedSize,
+ int nodeIndex)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (numInputs != expectedSize)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
+ numInputs, expectedSize, nodeIndex);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ const unsigned int expectedSize,
+ int nodeIndex)
+{
+ auto numOutputs = tfLiteNode->outputs->size;
+ if (numOutputs != expectedSize)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
+ numOutputs, expectedSize, nodeIndex);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
+{
+ auto tensorAllocationType = tfLiteTensor.allocation_type;
+ if (tensorAllocationType == kTfLiteDynamic)
+ {
+ return true;
+ }
+ return false;
+}
+
+armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
+{
+ armnn::DataType type;
+ switch (tfLiteTensor.type)
+ {
+ case kTfLiteBool:
+ type = armnn::DataType::Boolean;
+ break;
+ case kTfLiteFloat32:
+ type = armnn::DataType::Float32;
+ break;
+ case kTfLiteFloat16:
+ type = armnn::DataType::Float16;
+ break;
+ case kTfLiteUInt8:
+ type = armnn::DataType::QAsymmU8;
+ break;
+ case kTfLiteInt8:
+ type = armnn::DataType::QSymmS8;
+ break;
+ case kTfLiteInt16:
+ type = armnn::DataType::QSymmS16;
+ break;
+ case kTfLiteInt32:
+ type = armnn::DataType::Signed32;
+ break;
+ default:
+ throw armnn::Exception("TfLiteArmnnDelegate: Unsupported data type: " + tfLiteTensor.type);
+ }
+
+ armnn::TensorInfo ret;
+ auto tensorDimensionSize = tfLiteTensor.dims->size;
+ if (tensorDimensionSize == 0)
+ {
+ armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
+ else
+ {
+ std::vector<unsigned int> tensorDims(tensorDimensionSize);
+ bool dimensionsSpecificity[5] = { true, true, true, true, true };
+ for (unsigned int i = 0; i < tensorDimensionSize; ++i) {
+ auto dim = tfLiteTensor.dims->data[i];
+ if (dim == 0)
+ {
+ dimensionsSpecificity[i] = false;
+ }
+ tensorDims[i] = dim;
+ }
+ armnn::TensorShape tensorShape(tensorDimensionSize, tensorDims.data(), dimensionsSpecificity);
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
+
+ auto quantizationInfo = tfLiteTensor.quantization;
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ // get per-channel quantization parameters
+ const auto* affineQuantization =
+ reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
+ std::vector<float> quantizationScales;
+ for (unsigned int i = 1; i < affineQuantization->scale->size; ++i)
+ {
+ quantizationScales.push_back(affineQuantization->scale->data[i]);
+ }
+ ret.SetQuantizationScales(quantizationScales);
+ ret.SetQuantizationDim(armnn::MakeOptional<unsigned int>(affineQuantization->quantized_dimension));
+ }
+ else
+ {
+ auto quantizationParameters = tfLiteTensor.params;
+ ret.SetQuantizationScale(quantizationParameters.scale);
+ ret.SetQuantizationOffset(quantizationParameters.zero_point);
+ }
+
+ return ret;
+}
+
+TfLiteStatus Connect(armnn::IConnectableLayer& layer,
+ TfLiteNode* tfLiteNode,
+ armnnDelegate::DelegateData& data)
+{
+ ARMNN_ASSERT(tfLiteNode->inputs->size == layer.GetNumInputSlots());
+ ARMNN_ASSERT(tfLiteNode->outputs->size == layer.GetNumOutputSlots());
+
+ // connect the input slots
+ for (unsigned int inputIndex = 0; inputIndex < layer.GetNumInputSlots(); ++inputIndex)
+ {
+ data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer.GetInputSlot(inputIndex));
+ }
+
+ // prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < layer.GetNumOutputSlots(); ++outputIndex)
+ {
+ armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]] = &outputSlot;
+ }
+ return kTfLiteOk;
+}
+
+} // namespace anonymous