aboutsummaryrefslogtreecommitdiff
path: root/delegate/common/src
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-14 12:10:28 +0000
committerTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-28 11:41:55 +0100
commitad1b3d7518429e2d16a2695d9b0bbf81b6565ac9 (patch)
treea5b8e1ad68a2437f007338f0b6195ca5ed2bddc3 /delegate/common/src
parent9cb3466b677a1048b8abb24661e92c4c83fdda04 (diff)
downloadarmnn-ad1b3d7518429e2d16a2695d9b0bbf81b6565ac9.tar.gz
IVGCVSW-7555 Restructure Delegate
* New folders created: * common is for common code where TfLite API is not used * classic is for existing delegate implementations * opaque is for new opaque delegate implementation, * tests is for shared between existing Delegate and Opaque Delegate which have test utils to work which delegate to use. * Existing delegate is built to libarmnnDelegate.so and opaque delegate is built as libarmnnOpaqueDelegate.so * Opaque structure is introduced but no API is added yet. * CmakeList.txt and delegate/CMakeList.txt have been modified and 2 new CmakeList.txt added * Rename BUILD_ARMNN_TFLITE_DELEGATE as BUILD_CLASSIC_DELEGATE * Rename BUILD_ARMNN_TFLITE_OPAQUE_DELEGATE as BUILD_OPAQUE_DELEGATE Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ib682b9ad0ac8d8acdc4ec6d9099bb0008a9fe8ed
Diffstat (limited to 'delegate/common/src')
-rw-r--r--delegate/common/src/DelegateOptions.cpp256
-rw-r--r--delegate/common/src/DelegateUtils.hpp639
2 files changed, 895 insertions, 0 deletions
diff --git a/delegate/common/src/DelegateOptions.cpp b/delegate/common/src/DelegateOptions.cpp
new file mode 100644
index 0000000000..fc4858fa29
--- /dev/null
+++ b/delegate/common/src/DelegateOptions.cpp
@@ -0,0 +1,256 @@
+//
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <DelegateOptions.hpp>
+#include <armnn/utility/NumericCast.hpp>
+#include <armnn/utility/StringUtils.hpp>
+
+namespace armnnDelegate
+{
+
+DelegateOptions::DelegateOptions(armnn::Compute computeDevice,
+ const std::vector<armnn::BackendOptions>& backendOptions,
+ const armnn::Optional<armnn::LogSeverity> logSeverityLevel)
+ : m_Backends({computeDevice}), m_RuntimeOptions(), m_LoggingSeverity(logSeverityLevel)
+{
+ m_RuntimeOptions.m_BackendOptions = backendOptions;
+}
+
+DelegateOptions::DelegateOptions(const std::vector<armnn::BackendId>& backends,
+ const std::vector<armnn::BackendOptions>& backendOptions,
+ const armnn::Optional<armnn::LogSeverity> logSeverityLevel)
+ : m_Backends(backends), m_RuntimeOptions(), m_LoggingSeverity(logSeverityLevel)
+{
+ m_RuntimeOptions.m_BackendOptions = backendOptions;
+}
+
+DelegateOptions::DelegateOptions(armnn::Compute computeDevice,
+ const armnn::OptimizerOptions& optimizerOptions,
+ const armnn::Optional<armnn::LogSeverity>& logSeverityLevel,
+ const armnn::Optional<armnn::DebugCallbackFunction>& func)
+ : m_Backends({computeDevice}),
+ m_RuntimeOptions(),
+ m_OptimizerOptions(optimizerOptions),
+ m_LoggingSeverity(logSeverityLevel),
+ m_DebugCallbackFunc(func)
+{
+}
+
+DelegateOptions::DelegateOptions(const std::vector<armnn::BackendId>& backends,
+ const armnn::OptimizerOptions& optimizerOptions,
+ const armnn::Optional<armnn::LogSeverity>& logSeverityLevel,
+ const armnn::Optional<armnn::DebugCallbackFunction>& func)
+ : m_Backends(backends),
+ m_RuntimeOptions(),
+ m_OptimizerOptions(optimizerOptions),
+ m_LoggingSeverity(logSeverityLevel),
+ m_DebugCallbackFunc(func)
+{
+}
+
+DelegateOptions::DelegateOptions(char const* const* options_keys,
+ char const* const* options_values,
+ size_t num_options,
+ void (*report_error)(const char*))
+{
+ armnn::IRuntime::CreationOptions runtimeOptions;
+ armnn::OptimizerOptions optimizerOptions;
+ bool internalProfilingState = false;
+ armnn::ProfilingDetailsMethod internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsWithEvents;
+ for (size_t i = 0; i < num_options; ++i)
+ {
+ // Process backends
+ if (std::string(options_keys[i]) == std::string("backends"))
+ {
+ // The backend option is a comma separated string of backendIDs that needs to be split
+ std::vector<armnn::BackendId> backends;
+ char* dup = strdup(options_values[i]);
+ char* pch = std::strtok(dup, ",");
+ while (pch != NULL)
+ {
+ backends.push_back(pch);
+ pch = strtok (NULL, ",");
+ }
+ this->SetBackends(backends);
+ }
+ // Process dynamic-backends-path
+ else if (std::string(options_keys[i]) == std::string("dynamic-backends-path"))
+ {
+ runtimeOptions.m_DynamicBackendsPath = std::string(options_values[i]);
+ }
+ // Process logging level
+ else if (std::string(options_keys[i]) == std::string("logging-severity"))
+ {
+ this->SetLoggingSeverity(options_values[i]);
+ }
+ // Process GPU backend options
+ else if (std::string(options_keys[i]) == std::string("gpu-tuning-level"))
+ {
+ armnn::BackendOptions option("GpuAcc", {{"TuningLevel", atoi(options_values[i])}});
+ runtimeOptions.m_BackendOptions.push_back(option);
+ }
+ else if (std::string(options_keys[i]) == std::string("gpu-mlgo-tuning-file"))
+ {
+ armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath", std::string(options_values[i])}});
+ optimizerOptions.m_ModelOptions.push_back(option);
+ }
+ else if (std::string(options_keys[i]) == std::string("gpu-tuning-file"))
+ {
+ armnn::BackendOptions option("GpuAcc", {{"TuningFile", std::string(options_values[i])}});
+ runtimeOptions.m_BackendOptions.push_back(option);
+ }
+ else if (std::string(options_keys[i]) == std::string("gpu-enable-profiling"))
+ {
+ runtimeOptions.m_EnableGpuProfiling = (*options_values[i] != '0');
+ }
+ else if (std::string(options_keys[i]) == std::string("gpu-kernel-profiling-enabled"))
+ {
+ armnn::BackendOptions option("GpuAcc", {{"KernelProfilingEnabled",
+ armnn::stringUtils::StringToBool(options_values[i])}});
+ runtimeOptions.m_BackendOptions.push_back(option);
+ }
+ else if (std::string(options_keys[i]) == std::string("save-cached-network"))
+ {
+ armnn::BackendOptions option("GpuAcc", {{"SaveCachedNetwork",
+ armnn::stringUtils::StringToBool(options_values[i])}});
+ optimizerOptions.m_ModelOptions.push_back(option);
+ }
+ else if (std::string(options_keys[i]) == std::string("cached-network-filepath"))
+ {
+ armnn::BackendOptions option("GpuAcc", {{"CachedNetworkFilePath", std::string(options_values[i])}});
+ optimizerOptions.m_ModelOptions.push_back(option);
+ }
+ // Process GPU & CPU backend options
+ else if (std::string(options_keys[i]) == std::string("enable-fast-math"))
+ {
+ armnn::BackendOptions modelOptionGpu("GpuAcc", {{"FastMathEnabled",
+ armnn::stringUtils::StringToBool(options_values[i])}});
+ optimizerOptions.m_ModelOptions.push_back(modelOptionGpu);
+
+ armnn::BackendOptions modelOptionCpu("CpuAcc", {{"FastMathEnabled",
+ armnn::stringUtils::StringToBool(options_values[i])}});
+ optimizerOptions.m_ModelOptions.push_back(modelOptionCpu);
+ }
+ // Process CPU backend options
+ else if (std::string(options_keys[i]) == std::string("number-of-threads"))
+ {
+ unsigned int numberOfThreads = armnn::numeric_cast<unsigned int>(atoi(options_values[i]));
+ armnn::BackendOptions modelOption("CpuAcc", {{"NumberOfThreads", numberOfThreads}});
+ optimizerOptions.m_ModelOptions.push_back(modelOption);
+ }
+ // Process reduce-fp32-to-fp16 option
+ else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-fp16"))
+ {
+ optimizerOptions.m_ReduceFp32ToFp16 = armnn::stringUtils::StringToBool(options_values[i]);
+ }
+ // Process debug-data
+ else if (std::string(options_keys[i]) == std::string("debug-data"))
+ {
+ optimizerOptions.m_Debug = armnn::stringUtils::StringToBool(options_values[i]);
+ }
+ // Infer output-shape
+ else if (std::string(options_keys[i]) == std::string("infer-output-shape"))
+ {
+ armnn::BackendOptions backendOption("ShapeInferenceMethod",
+ {
+ { "InferAndValidate", armnn::stringUtils::StringToBool(options_values[i]) }
+ });
+ optimizerOptions.m_ModelOptions.push_back(backendOption);
+ }
+ // Allow expanded dims
+ else if (std::string(options_keys[i]) == std::string("allow-expanded-dims"))
+ {
+ armnn::BackendOptions backendOption("AllowExpandedDims",
+ {
+ { "AllowExpandedDims", armnn::stringUtils::StringToBool(options_values[i]) }
+ });
+ optimizerOptions.m_ModelOptions.push_back(backendOption);
+ }
+ // Process memory-import
+ else if (std::string(options_keys[i]) == std::string("memory-import"))
+ {
+ optimizerOptions.m_ImportEnabled = armnn::stringUtils::StringToBool(options_values[i]);
+ }
+ // Process enable-internal-profiling
+ else if (std::string(options_keys[i]) == std::string("enable-internal-profiling"))
+ {
+ internalProfilingState = *options_values[i] != '0';
+ optimizerOptions.m_ProfilingEnabled = internalProfilingState;
+ }
+ // Process internal-profiling-detail
+ else if (std::string(options_keys[i]) == std::string("internal-profiling-detail"))
+ {
+ uint32_t detailLevel = static_cast<uint32_t>(std::stoul(options_values[i]));
+ switch (detailLevel)
+ {
+ case 1:
+ internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsWithEvents;
+ break;
+ case 2:
+ internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsOnly;
+ break;
+ default:
+ internalProfilingDetail = armnn::ProfilingDetailsMethod::Undefined;
+ break;
+ }
+ }
+ // Process enable-external-profiling
+ else if (std::string(options_keys[i]) == std::string("enable-external-profiling"))
+ {
+ runtimeOptions.m_ProfilingOptions.m_EnableProfiling =
+ armnn::stringUtils::StringToBool(options_values[i]);
+ }
+ // Process timeline-profiling
+ else if (std::string(options_keys[i]) == std::string("timeline-profiling"))
+ {
+ runtimeOptions.m_ProfilingOptions.m_TimelineEnabled = armnn::stringUtils::StringToBool(options_values[i]);
+ }
+ // Process outgoing-capture-file
+ else if (std::string(options_keys[i]) == std::string("outgoing-capture-file"))
+ {
+ runtimeOptions.m_ProfilingOptions.m_OutgoingCaptureFile = options_values[i];
+ }
+ // Process incoming-capture-file
+ else if (std::string(options_keys[i]) == std::string("incoming-capture-file"))
+ {
+ runtimeOptions.m_ProfilingOptions.m_IncomingCaptureFile = options_values[i];
+ }
+ // Process file-only-external-profiling
+ else if (std::string(options_keys[i]) == std::string("file-only-external-profiling"))
+ {
+ runtimeOptions.m_ProfilingOptions.m_FileOnly = armnn::stringUtils::StringToBool(options_values[i]);
+ }
+ // Process counter-capture-period
+ else if (std::string(options_keys[i]) == std::string("counter-capture-period"))
+ {
+ runtimeOptions.m_ProfilingOptions.m_CapturePeriod = static_cast<uint32_t>(std::stoul(options_values[i]));
+ }
+ // Process profiling-file-format
+ else if (std::string(options_keys[i]) == std::string("profiling-file-format"))
+ {
+ runtimeOptions.m_ProfilingOptions.m_FileFormat = options_values[i];
+ }
+ // Process serialize-to-dot
+ else if (std::string(options_keys[i]) == std::string("serialize-to-dot"))
+ {
+ this->SetSerializeToDot(options_values[i]);
+ }
+
+ // Process disable-tflite-runtime-fallback
+ else if (std::string(options_keys[i]) == std::string("disable-tflite-runtime-fallback"))
+ {
+ this->DisableTfLiteRuntimeFallback(armnn::stringUtils::StringToBool(options_values[i]));
+ }
+ else
+ {
+ throw armnn::Exception("Unknown option for the ArmNN Delegate given: " + std::string(options_keys[i]));
+ }
+ }
+
+ this->SetRuntimeOptions(runtimeOptions);
+ this->SetOptimizerOptions(optimizerOptions);
+ this->SetInternalProfilingParams(internalProfilingState, internalProfilingDetail);
+}
+} // namespace armnnDelegate
diff --git a/delegate/common/src/DelegateUtils.hpp b/delegate/common/src/DelegateUtils.hpp
new file mode 100644
index 0000000000..1aa9029271
--- /dev/null
+++ b/delegate/common/src/DelegateUtils.hpp
@@ -0,0 +1,639 @@
+//
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/BackendHelper.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
+#include <armnnUtils/Permute.hpp>
+#include <armnnUtils/TensorUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+#include "tensorflow/lite/kernels/kernel_util.h"
+
+namespace
+{
+
+// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
+#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
+try \
+{ \
+ for (auto&& backendId : backends) \
+ { \
+ auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
+ if (layerSupportObject.IsBackendRegistered()) \
+ { \
+ std::string reasonIfUnsupported; \
+ supported = \
+ layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
+ if (supported) \
+ { \
+ setBackend = backendId; \
+ break; \
+ } \
+ else \
+ { \
+ if (reasonIfUnsupported.size() > 0) \
+ { \
+ TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
+ "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
+ } \
+ else \
+ { \
+ TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
+ "%s: not supported by armnn", opName); \
+ } \
+ } \
+ } \
+ else \
+ { \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
+ } \
+ } \
+ if (!supported) \
+ { \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
+ } \
+} \
+catch (const armnn::InvalidArgumentException &e) \
+{ \
+ throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
+}
+
+TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ const unsigned int expectedSize,
+ int nodeIndex)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (static_cast<unsigned int >(numInputs) != expectedSize)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
+ numInputs, expectedSize, nodeIndex);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ const unsigned int expectedSize,
+ int nodeIndex)
+{
+ auto numOutputs = tfLiteNode->outputs->size;
+ if (static_cast<unsigned int >(numOutputs) != expectedSize)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
+ numOutputs, expectedSize, nodeIndex);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
+{
+ auto tensorAllocationType = tfLiteTensor.allocation_type;
+ if (tensorAllocationType == kTfLiteDynamic)
+ {
+ return true;
+ }
+ return false;
+}
+
+bool IsValid(const TfLiteTensor* tfLiteTensor)
+{
+ return tfLiteTensor == nullptr ? false : true;
+}
+
+bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
+{
+ if(!IsValid(&tfLiteTensor))
+ {
+ std::cout << "..Is Not Valid" << std::endl;
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return false;
+ }
+ if (IsDynamicTensor(tfLiteTensor))
+ {
+ std::cout << "..IsDynamicTensor" << std::endl;
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return false;
+ }
+ return true;
+}
+
+uint32_t NonNegative(int32_t value, int nodeIndex)
+{
+ if (value < 0)
+ {
+ throw armnn::Exception(
+ "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
+ }
+ else
+ {
+ return static_cast<uint32_t>(value);
+ }
+}
+
+bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
+{
+ auto quantizationInfo = tfLiteTensor.quantization;
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ return true;
+ }
+ return false;
+}
+
+TfLiteStatus Connect(armnn::IConnectableLayer* layer,
+ TfLiteNode* tfLiteNode,
+ armnnDelegate::DelegateData& data)
+{
+ ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
+
+ // Connect the input slots
+ for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
+ {
+ if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
+ {
+ data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
+ }
+ }
+
+ // Prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
+ {
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
+ }
+
+ return kTfLiteOk;
+}
+
+void ExpandTensorRankToEqual(armnn::TensorInfo& inputInfo0,
+ armnn::TensorInfo& inputInfo1)
+{
+ unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
+ unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
+
+ if (inputDimensions0 == inputDimensions1)
+ {
+ return;
+ }
+
+ unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
+
+ bool input0IsSmaller = inputDimensions0 < inputDimensions1;
+ armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
+ const armnn::TensorShape& newShape = armnnUtils::ExpandDimsToRank(smallInfo.GetShape(), biggerInputDimensions);
+
+ smallInfo.SetShape(newShape);
+
+}
+
+TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ TfLiteFusedActivation activationType,
+ armnn::IConnectableLayer* prevLayer,
+ unsigned int outputSlotIndex,
+ armnnDelegate::DelegateData& data)
+{
+
+ const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
+
+ armnn::ActivationDescriptor activationDesc;
+
+ switch (activationType)
+ {
+ case kTfLiteActNone:
+ {
+ // No Activation
+ return kTfLiteOk;
+ }
+ case kTfLiteActRelu:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::ReLu;
+ break;
+ }
+// The name of kTfLiteActRelu1 changed after TF Lite v2.3
+#if defined(ARMNN_POST_TFLITE_2_3)
+ case kTfLiteActReluN1To1:
+#else
+ case kTfLiteActRelu1:
+#endif
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = -1.0f;
+ break;
+ }
+ case kTfLiteActRelu6:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+ activationDesc.m_A = 6.0f;
+ activationDesc.m_B = 0.0f;
+ break;
+ }
+ case kTfLiteActSigmoid:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
+ break;
+ }
+ case kTfLiteActTanh:
+ {
+ activationDesc.m_Function = armnn::ActivationFunction::TanH;
+ activationDesc.m_A = 1.0f;
+ activationDesc.m_B = 1.0f;
+ break;
+ }
+ default:
+ return kTfLiteError;
+ }
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
+ tfLiteContext,
+ IsActivationSupported,
+ data.m_Backends,
+ isSupported,
+ setBackend,
+ activationOutputInfo,
+ activationOutputInfo,
+ activationDesc);
+ if (!isSupported)
+ {
+ return kTfLiteError;
+ }
+ armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ activationLayer->SetBackendId(setBackend);
+
+ ARMNN_ASSERT(activationLayer != nullptr);
+ activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
+
+ // Connect and prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
+ {
+ data.m_OutputSlotForNode[static_cast<unsigned long>(
+ tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
+ armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[static_cast<unsigned long>(
+ tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
+ }
+ return kTfLiteOk;
+}
+
+armnn::IConnectableLayer* AddReshapeLayer(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ armnn::IConnectableLayer* prevLayer,
+ armnn::TensorInfo reshapedOutputTensorInfo,
+ armnn::TensorInfo outputTensorInfo,
+ armnnDelegate::DelegateData& data)
+{
+ armnn::ReshapeDescriptor desc;
+ desc.m_TargetShape = outputTensorInfo.GetShape();
+
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
+ tfLiteContext,
+ IsReshapeSupported,
+ data.m_Backends,
+ isSupported,
+ setBackend,
+ reshapedOutputTensorInfo,
+ outputTensorInfo,
+ desc);
+
+ if (!isSupported)
+ {
+ return nullptr;
+ }
+
+ armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
+ reshapeLayer->SetBackendId(setBackend);
+ ARMNN_ASSERT(reshapeLayer != nullptr);
+
+ prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
+ reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // Connect and prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
+ {
+ data.m_OutputSlotForNode[static_cast<unsigned long>(
+ tfLiteNode->outputs->data[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
+ armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[static_cast<unsigned long>(
+ tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
+ }
+ return reshapeLayer;
+}
+
+armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
+{
+ switch (tfLiteTensor.type)
+ {
+ case kTfLiteBool:
+ return armnn::DataType::Boolean;
+ case kTfLiteFloat32:
+ return armnn::DataType::Float32;
+ case kTfLiteFloat16:
+ return armnn::DataType::Float16;
+ case kTfLiteUInt8:
+ return armnn::DataType::QAsymmU8;
+ case kTfLiteInt8:
+ {
+ auto quantizationInfo = tfLiteTensor.quantization;
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ auto* quantization =
+ reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
+ if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
+ {
+ return armnn::DataType::QAsymmS8;
+ }
+ else
+ {
+ return armnn::DataType::QSymmS8;
+ }
+ }
+ else
+ {
+ return armnn::DataType::QAsymmS8;
+ }
+ }
+ case kTfLiteInt16:
+ return armnn::DataType::QSymmS16;
+ case kTfLiteInt32:
+ return armnn::DataType::Signed32;
+ case kTfLiteInt64:
+ return armnn::DataType::Signed64;
+ default:
+ throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
+ }
+}
+
+armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, bool isOutput = false)
+{
+ armnn::DataType type = GetDataType(tfLiteTensor);
+ armnn::TensorInfo ret;
+ auto tensorDimensionSize = tfLiteTensor.dims->size;
+ if (tensorDimensionSize == 0)
+ {
+ // If input tensor does not have a shape
+ // assuming that it has 1D tensor
+ if (!isOutput)
+ {
+ std::vector<unsigned int> safeShape = { 1 };
+ bool dimensionsSpecificity[1] = { true };
+ armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
+ safeShape.data(),
+ dimensionsSpecificity);
+ ret = armnn::TensorInfo(tensorShape, type);
+ if(tflite::IsConstantTensor(&tfLiteTensor))
+ {
+ ret.SetConstant(true);
+ }
+ }
+ else
+ {
+ armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
+ }
+ else
+ {
+ std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
+ bool dimensionsSpecificity[5] = { true, true, true, true, true };
+ for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
+ auto dim = tfLiteTensor.dims->data[i];
+ if (dim == 0)
+ {
+ dimensionsSpecificity[i] = false;
+ }
+ tensorDims[i] = static_cast<unsigned int>(dim);
+ }
+ armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
+ tensorDims.data(),
+ dimensionsSpecificity);
+
+ if(tflite::IsConstantTensor(&tfLiteTensor))
+ {
+ ret = armnn::TensorInfo(tensorShape, type);
+ ret.SetConstant(true);
+ }
+ else
+ {
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
+ }
+
+ auto quantizationInfo = tfLiteTensor.quantization;
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ // get per-channel quantization parameters
+ const auto* affineQuantization =
+ reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
+ if (affineQuantization->scale->size > 1)
+ {
+ std::vector<float> quantizationScales;
+ for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
+ {
+ quantizationScales.push_back(affineQuantization->scale->data[i]);
+ }
+ ret.SetQuantizationScales(quantizationScales);
+ ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
+ }
+ else
+ {
+ ret.SetQuantizationScale(affineQuantization->scale->data[0]);
+ ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
+ }
+ }
+ else
+ {
+ auto quantizationParameters = tfLiteTensor.params;
+ ret.SetQuantizationScale(quantizationParameters.scale);
+ ret.SetQuantizationOffset(quantizationParameters.zero_point);
+ }
+
+ return ret;
+}
+
+armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
+ const armnn::TensorInfo& tensorInfo)
+{
+ if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
+ {
+ throw armnn::Exception(
+ "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
+ }
+
+ return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
+}
+
+armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
+{
+ const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
+ armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
+ return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
+}
+
+void CalcPadding(uint32_t inputSize,
+ uint32_t filterSize,
+ uint32_t stride,
+ uint32_t dilation,
+ uint32_t& paddingFront,
+ uint32_t& paddingBack,
+ TfLitePadding padding)
+{
+ paddingFront = 0;
+ paddingBack = 0;
+ if (padding == kTfLitePaddingSame)
+ {
+ uint32_t outputSize = (inputSize + stride - 1) / stride;
+ uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
+ uint32_t temp = (outputSize - 1) * stride + dilatedSize;
+ if (temp > inputSize)
+ {
+ paddingFront = (temp - inputSize) / 2;
+ paddingBack = (temp - inputSize) - paddingFront;
+ }
+ }
+}
+
+TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
+ const armnn::TensorInfo& constTensorInfo,
+ TfLiteContext* tfLiteContext,
+ const TfLiteTensor& tfLiteTensor,
+ armnnDelegate::DelegateData& data,
+ unsigned int slotIndex)
+{
+ IgnoreUnused(layer);
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
+ tfLiteContext,
+ IsConstantSupported,
+ data.m_Backends,
+ isSupported,
+ setBackend,
+ constTensorInfo);
+ if (!isSupported)
+ {
+ return kTfLiteError;
+ }
+
+ auto constantInput = CreateConstTensor(&tfLiteTensor,
+ constTensorInfo);
+ armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
+ constantLayer->SetBackendId(setBackend);
+ armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(constTensorInfo);
+
+ data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
+
+ return kTfLiteOk;
+}
+
+bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
+{
+ // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
+ // less then the input is not present.
+ if (tfLiteNode->inputs->size > operandIndex && tfLiteNode->inputs->data[operandIndex] >= 0)
+ {
+ return true;
+ }
+ return false;
+}
+
+TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
+ armnnDelegate::DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode)
+{
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ // Process input tensors
+ // If input tensor is a Constant tensor create a constant layer and connect it to the network
+ for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
+ {
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
+ if (tflite::IsConstantTensor(&tfLiteInputTensor))
+ {
+ armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
+ tfLiteContext,
+ IsConstantSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo);
+ if (!isSupported)
+ {
+ return kTfLiteError;
+ }
+ auto constantInput = CreateConstTensor(&tfLiteInputTensor,
+ inputTensorInfo);
+ armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+ constantLayer->SetBackendId(setBackend);
+ armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(inputTensorInfo);
+
+ delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
+ }
+ }
+ return kTfLiteOk;
+}
+
+unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
+{
+ int numDims = armnn::numeric_cast<int>(numDimensions);
+ int wrappedIndex = index < 0 ? numDims + index : index;
+ ARMNN_ASSERT(wrappedIndex >= 0);
+ ARMNN_ASSERT(wrappedIndex < numDims);
+
+ return static_cast<unsigned int>(wrappedIndex);
+};
+
+bool AreAllSigned32(const armnn::TensorInfo& inputInfo1,
+ const armnn::TensorInfo& inputInfo2,
+ const armnn::TensorInfo& outputInfo)
+{
+ return (armnn::DataType::Signed32 == inputInfo1.GetDataType()) &&
+ (armnn::DataType::Signed32 == inputInfo2.GetDataType()) &&
+ (armnn::DataType::Signed32 == outputInfo.GetDataType());
+}
+
+void UpdateConstantTensorOutputs(const armnn::TensorInfo& inputInfo, armnn::TensorInfo& outputInfo)
+{
+ // If input tensor info is constant and output tensor info shape is not specified
+ // set the output shape from input shape
+ if (inputInfo.IsConstant() && outputInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
+ {
+ outputInfo.SetShape(inputInfo.GetShape());
+ }
+ return;
+}
+
+} // namespace anonymous