aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-10-23 17:14:43 +0100
committerJim Flynn <jim.flynn@arm.com>2020-10-27 13:51:58 +0000
commit62483bee640e7d8accf6ac77b24c6e9828841851 (patch)
treeba7025bc86819c3d787428dd16b5be73b90a4353
parent3d1323ff87fa92ff9cfc74097148b97fa1784416 (diff)
downloadarmnn-62483bee640e7d8accf6ac77b24c6e9828841851.tar.gz
IVGCVSW-5366 'Add a do nothing SubGraph class'
IVGCVSW-5373 'Implement the ABS operator in the Delegate' * Added a Switch statement into the VisitNode() function * Separated the Visit functions into the categorized source files * Implemented VisitElementwiseUnary() function * Added tests for ABS and SQRT Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: If9654d0a8d8ff7dcd6fb5cbe0dc312941772affb
-rw-r--r--delegate/CMakeLists.txt39
-rw-r--r--delegate/include/armnn_delegate.hpp135
-rw-r--r--delegate/src/Activation.hpp25
-rw-r--r--delegate/src/ArgMinMax.hpp25
-rw-r--r--delegate/src/BatchSpace.hpp34
-rw-r--r--delegate/src/Comparison.hpp25
-rw-r--r--delegate/src/Control.hpp25
-rw-r--r--delegate/src/Convolution.hpp25
-rw-r--r--delegate/src/DelegateUtils.hpp205
-rw-r--r--delegate/src/ElementwiseBinary.hpp25
-rw-r--r--delegate/src/ElementwiseUnary.hpp83
-rw-r--r--delegate/src/Fill.hpp25
-rw-r--r--delegate/src/FullyConnected.hpp25
-rw-r--r--delegate/src/Gather.hpp25
-rw-r--r--delegate/src/Lstm.hpp25
-rw-r--r--delegate/src/Normalization.hpp25
-rw-r--r--delegate/src/Pad.hpp25
-rw-r--r--delegate/src/Pooling.hpp25
-rw-r--r--delegate/src/Quantization.hpp34
-rw-r--r--delegate/src/Redefine.hpp43
-rw-r--r--delegate/src/Resize.hpp25
-rw-r--r--delegate/src/Round.hpp25
-rw-r--r--delegate/src/Slice.hpp25
-rw-r--r--delegate/src/Softmax.hpp25
-rw-r--r--delegate/src/SpaceDepth.hpp34
-rw-r--r--delegate/src/Transpose.hpp25
-rw-r--r--delegate/src/armnn_delegate.cpp664
-rw-r--r--delegate/src/test/AbsTest.cpp98
-rw-r--r--delegate/src/test/ArmnnDelegateTest.cpp12
-rw-r--r--delegate/src/test/SqrtTest.cpp97
30 files changed, 1789 insertions, 139 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index be18052548..aba27dfdaa 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -15,7 +15,31 @@ list(APPEND armnnDelegate_sources
include/armnn_delegate.hpp
include/DelegateOptions.hpp
src/armnn_delegate.cpp
- src/DelegateOptions.cpp)
+ src/DelegateOptions.cpp
+ src/Activation.hpp
+ src/ArgMinMax.hpp
+ src/BatchSpace.hpp
+ src/Comparison.hpp
+ src/Convolution.hpp
+ src/Control.hpp
+ src/DelegateUtils.hpp
+ src/ElementwiseBinary.hpp
+ src/ElementwiseUnary.hpp
+ src/Fill.hpp
+ src/FullyConnected.hpp
+ src/Gather.hpp
+ src/Lstm.hpp
+ src/Normalization.hpp
+ src/Pad.hpp
+ src/Pooling.hpp
+ src/Quantization.hpp
+ src/Redefine.hpp
+ src/Resize.hpp
+ src/Round.hpp
+ src/Slice.hpp
+ src/Softmax.hpp
+ src/SpaceDepth.hpp
+ src/Transpose.hpp)
add_library(armnnDelegate SHARED ${armnnDelegate_sources})
@@ -52,10 +76,6 @@ target_include_directories(armnnDelegate
PRIVATE
${TfLite_INCLUDE_DIR})
-target_include_directories(armnnDelegate
- SYSTEM PRIVATE
- "${TfLite_Schema_INCLUDE_PATH}")
-
## Add Flatbuffers dependency
find_package(Flatbuffers REQUIRED MODULE)
@@ -68,7 +88,10 @@ target_include_directories(armnnDelegate
set(armnnDelegate_unittest_sources)
list(APPEND armnnDelegate_unittest_sources
- src/test/ArmnnDelegateTest.cpp)
+ src/test/AbsTest.cpp
+ src/test/ArmnnDelegateTest.cpp
+ src/test/ElementwiseUnaryTestHelper.hpp
+ src/test/SqrtTest.cpp)
add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources})
target_include_directories(DelegateUnitTests PRIVATE src)
@@ -81,8 +104,8 @@ target_include_directories(DelegateUnitTests
${TfLite_INCLUDE_DIR})
target_include_directories(DelegateUnitTests
- SYSTEM PRIVATE
- "${TfLite_Schema_INCLUDE_PATH}")
+ PRIVATE
+ ${Flatbuffers_INCLUDE_DIR})
####################################################
## Export targets
diff --git a/delegate/include/armnn_delegate.hpp b/delegate/include/armnn_delegate.hpp
index 6136f2bebe..6f18185d7b 100644
--- a/delegate/include/armnn_delegate.hpp
+++ b/delegate/include/armnn_delegate.hpp
@@ -3,7 +3,8 @@
// SPDX-License-Identifier: MIT
//
-#pragma once
+#ifndef ARMNN_TFLITE_DELEGATE
+#define ARMNN_TFLITE_DELEGATE
#include "DelegateOptions.hpp"
@@ -15,32 +16,51 @@
namespace armnnDelegate
{
-TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate);
+struct DelegateData
+{
+ DelegateData(const std::vector<armnn::BackendId>& backends)
+ : m_Backends(backends)
+ , m_Network(nullptr, nullptr)
+ {}
+
+ const std::vector<armnn::BackendId> m_Backends;
+ armnn::INetworkPtr m_Network;
+ std::vector<armnn::IOutputSlot*> m_OutputSlotForNode;
+};
+
+// Forward decleration for functions initializing the ArmNN Delegate
+DelegateOptions TfLiteArmnnDelegateOptionsDefault();
+
+TfLiteDelegate* TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options);
+
+void TfLiteArmnnDelegateDelete(TfLiteDelegate* tfLiteDelegate);
-/// Delegate class
+TfLiteStatus DoPrepare(TfLiteContext* context, TfLiteDelegate* delegate);
+
+/// ArmNN Delegate
class Delegate
{
friend class ArmnnSubgraph;
public:
explicit Delegate(armnnDelegate::DelegateOptions options);
- TfLiteIntArray* CollectOperatorsToDelegate(TfLiteContext* context);
+ TfLiteIntArray* IdentifyOperatorsToDelegate(TfLiteContext* context);
TfLiteDelegate* GetDelegate();
private:
TfLiteDelegate m_Delegate = {
reinterpret_cast<void*>(this), // .data_
- DelegatePrepare, // .Prepare
+ DoPrepare, // .Prepare
nullptr, // .CopyFromBufferHandle
nullptr, // .CopyToBufferHandle
nullptr, // .FreeBufferHandle
kTfLiteDelegateFlagsNone, // .flags
};
- /// Arm NN Runtime pointer
+ /// ArmNN Runtime pointer
armnn::IRuntimePtr m_Runtime;
- /// Arm NN Delegate Options
+ /// ArmNN Delegate Options
armnnDelegate::DelegateOptions m_Options;
};
@@ -54,102 +74,45 @@ public:
TfLiteStatus Prepare(TfLiteContext* tfLiteContext);
- TfLiteStatus Invoke(TfLiteContext* tfLiteContext);
+ TfLiteStatus Invoke(TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode);
- static TfLiteStatus VisitNode(armnn::INetworkPtr& network,
+ static TfLiteStatus VisitNode(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteRegistration* tfLiteRegistration,
TfLiteNode* tfLiteNode,
int nodeIndex);
private:
- ArmnnSubgraph(armnn::NetworkId networkId, armnn::IRuntime* runtime)
- : m_NetworkId(networkId), m_Runtime(runtime)
+ ArmnnSubgraph(armnn::NetworkId networkId,
+ armnn::IRuntime* runtime,
+ std::vector<armnn::BindingPointInfo>& inputBindings,
+ std::vector<armnn::BindingPointInfo>& outputBindings)
+ : m_NetworkId(networkId), m_Runtime(runtime), m_InputBindings(inputBindings), m_OutputBindings(outputBindings)
{}
+ static TfLiteStatus AddInputLayer(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const TfLiteIntArray* inputs,
+ std::vector<armnn::BindingPointInfo>& inputBindings);
+
+ static TfLiteStatus AddOutputLayer(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const TfLiteIntArray* outputs,
+ std::vector<armnn::BindingPointInfo>& outputBindings);
+
+
/// The Network Id
armnn::NetworkId m_NetworkId;
/// ArmNN Rumtime
armnn::IRuntime* m_Runtime;
-};
-
-void* ArmnnSubgraphInit(TfLiteContext* tfLiteContext, const char* buffer, size_t length)
-{
- const TfLiteDelegateParams* parameters = reinterpret_cast<const TfLiteDelegateParams*>(buffer);
- return static_cast<void*>(ArmnnSubgraph::Create(
- tfLiteContext, parameters, static_cast<::armnnDelegate::Delegate*>(parameters->delegate->data_)));
-}
+ // Binding information for inputs and outputs
+ std::vector<armnn::BindingPointInfo> m_InputBindings;
+ std::vector<armnn::BindingPointInfo> m_OutputBindings;
-TfLiteStatus ArmnnSubgraphPrepare(TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode)
-{
- if (tfLiteNode->user_data == nullptr)
- {
- return kTfLiteError;
- }
-
- return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Prepare(tfLiteContext);
-}
-
-TfLiteStatus ArmnnSubgraphInvoke(TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode)
-{
- if (tfLiteNode->user_data == nullptr)
- {
- return kTfLiteError;
- }
-
- return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Invoke(tfLiteContext);
-}
-
-void ArmnnSubgraphFree(TfLiteContext* tfLiteContext, void* buffer)
-{
- if (buffer != nullptr)
- {
- delete static_cast<ArmnnSubgraph*>(buffer);
- }
-}
-
-const TfLiteRegistration armnnSubgraphRegistration = {
- ArmnnSubgraphInit, // .init
- ArmnnSubgraphFree, // .free
- ArmnnSubgraphPrepare, // .prepare
- ArmnnSubgraphInvoke, // .invoke
- nullptr, // .profiling_string
- 0, // .builtin_code
- "TfLiteArmnnDelegate", // .custom_name
- 1, // .version
};
-TfLiteStatus DelegatePrepare(TfLiteContext* tfLiteContext, TfLiteDelegate* tfLiteDelegate)
-{
- TfLiteIntArray* supportedOperators =
- static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_)->CollectOperatorsToDelegate(tfLiteContext);
-
- const TfLiteStatus status =
- tfLiteContext->ReplaceNodeSubsetsWithDelegateKernels(
- tfLiteContext, armnnSubgraphRegistration, supportedOperators, tfLiteDelegate);
- TfLiteIntArrayFree(supportedOperators);
-
- return status;
-}
-
} // armnnDelegate namespace
-armnnDelegate::DelegateOptions TfLiteArmnnDelegateOptionsDefault() {
- armnnDelegate::DelegateOptions options(armnn::Compute::CpuRef);
- return options;
-}
+#endif // ARMNN_TFLITE_DELEGATE
-TfLiteDelegate* TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
-{
- auto* armnnDelegate = new ::armnnDelegate::Delegate(options);
- return armnnDelegate->GetDelegate();
-}
-
-void TfLiteArmnnDelegateDelete(TfLiteDelegate* tfLiteDelegate)
-{
- if (tfLiteDelegate != nullptr)
- {
- delete static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_);
- }
-} \ No newline at end of file
diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp
new file mode 100644
index 0000000000..bd80849295
--- /dev/null
+++ b/delegate/src/Activation.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitActivationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t comparisonOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp
new file mode 100644
index 0000000000..4d454e10bb
--- /dev/null
+++ b/delegate/src/ArgMinMax.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t argMinMaxOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/BatchSpace.hpp b/delegate/src/BatchSpace.hpp
new file mode 100644
index 0000000000..3479ddf30b
--- /dev/null
+++ b/delegate/src/BatchSpace.hpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
new file mode 100644
index 0000000000..19d8de10e1
--- /dev/null
+++ b/delegate/src/Comparison.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t comparisonOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
new file mode 100644
index 0000000000..53e5f1b3ef
--- /dev/null
+++ b/delegate/src/Control.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitControlOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t controlOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
new file mode 100644
index 0000000000..f16c9638c3
--- /dev/null
+++ b/delegate/src/Convolution.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
new file mode 100644
index 0000000000..16dc8a81d4
--- /dev/null
+++ b/delegate/src/DelegateUtils.hpp
@@ -0,0 +1,205 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/BackendHelper.hpp>
+#include <armnn/utility/Assert.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace
+{
+
+// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
+#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
+try \
+{ \
+ for (auto&& backendId : backends) \
+ { \
+ auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
+ if (layerSupportObject) \
+ { \
+ std::string reasonIfUnsupported; \
+ supported = \
+ layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
+ if (supported) \
+ { \
+ break; \
+ } \
+ else \
+ { \
+ if (reasonIfUnsupported.size() > 0) \
+ { \
+ TF_LITE_KERNEL_LOG( \
+ tfLiteContext, "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
+ } \
+ else \
+ { \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by armnn", funcName); \
+ } \
+ } \
+ } \
+ else \
+ { \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
+ } \
+ } \
+ if (!supported) \
+ { \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
+ } \
+} \
+catch (const armnn::InvalidArgumentException &e) \
+{ \
+ throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
+}
+
+TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ const unsigned int expectedSize,
+ int nodeIndex)
+{
+ auto numInputs = tfLiteNode->inputs->size;
+ if (numInputs != expectedSize)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
+ numInputs, expectedSize, nodeIndex);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ const unsigned int expectedSize,
+ int nodeIndex)
+{
+ auto numOutputs = tfLiteNode->outputs->size;
+ if (numOutputs != expectedSize)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
+ numOutputs, expectedSize, nodeIndex);
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
+{
+ auto tensorAllocationType = tfLiteTensor.allocation_type;
+ if (tensorAllocationType == kTfLiteDynamic)
+ {
+ return true;
+ }
+ return false;
+}
+
+armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
+{
+ armnn::DataType type;
+ switch (tfLiteTensor.type)
+ {
+ case kTfLiteBool:
+ type = armnn::DataType::Boolean;
+ break;
+ case kTfLiteFloat32:
+ type = armnn::DataType::Float32;
+ break;
+ case kTfLiteFloat16:
+ type = armnn::DataType::Float16;
+ break;
+ case kTfLiteUInt8:
+ type = armnn::DataType::QAsymmU8;
+ break;
+ case kTfLiteInt8:
+ type = armnn::DataType::QSymmS8;
+ break;
+ case kTfLiteInt16:
+ type = armnn::DataType::QSymmS16;
+ break;
+ case kTfLiteInt32:
+ type = armnn::DataType::Signed32;
+ break;
+ default:
+ throw armnn::Exception("TfLiteArmnnDelegate: Unsupported data type: " + tfLiteTensor.type);
+ }
+
+ armnn::TensorInfo ret;
+ auto tensorDimensionSize = tfLiteTensor.dims->size;
+ if (tensorDimensionSize == 0)
+ {
+ armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
+ else
+ {
+ std::vector<unsigned int> tensorDims(tensorDimensionSize);
+ bool dimensionsSpecificity[5] = { true, true, true, true, true };
+ for (unsigned int i = 0; i < tensorDimensionSize; ++i) {
+ auto dim = tfLiteTensor.dims->data[i];
+ if (dim == 0)
+ {
+ dimensionsSpecificity[i] = false;
+ }
+ tensorDims[i] = dim;
+ }
+ armnn::TensorShape tensorShape(tensorDimensionSize, tensorDims.data(), dimensionsSpecificity);
+ ret = armnn::TensorInfo(tensorShape, type);
+ }
+
+ auto quantizationInfo = tfLiteTensor.quantization;
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
+ {
+ // get per-channel quantization parameters
+ const auto* affineQuantization =
+ reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
+ std::vector<float> quantizationScales;
+ for (unsigned int i = 1; i < affineQuantization->scale->size; ++i)
+ {
+ quantizationScales.push_back(affineQuantization->scale->data[i]);
+ }
+ ret.SetQuantizationScales(quantizationScales);
+ ret.SetQuantizationDim(armnn::MakeOptional<unsigned int>(affineQuantization->quantized_dimension));
+ }
+ else
+ {
+ auto quantizationParameters = tfLiteTensor.params;
+ ret.SetQuantizationScale(quantizationParameters.scale);
+ ret.SetQuantizationOffset(quantizationParameters.zero_point);
+ }
+
+ return ret;
+}
+
+TfLiteStatus Connect(armnn::IConnectableLayer& layer,
+ TfLiteNode* tfLiteNode,
+ armnnDelegate::DelegateData& data)
+{
+ ARMNN_ASSERT(tfLiteNode->inputs->size == layer.GetNumInputSlots());
+ ARMNN_ASSERT(tfLiteNode->outputs->size == layer.GetNumOutputSlots());
+
+ // connect the input slots
+ for (unsigned int inputIndex = 0; inputIndex < layer.GetNumInputSlots(); ++inputIndex)
+ {
+ data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer.GetInputSlot(inputIndex));
+ }
+
+ // prepare output slots
+ for (unsigned int outputIndex = 0; outputIndex < layer.GetNumOutputSlots(); ++outputIndex)
+ {
+ armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(outputIndex);
+ data.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]] = &outputSlot;
+ }
+ return kTfLiteOk;
+}
+
+} // namespace anonymous
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
new file mode 100644
index 0000000000..ff24012bdb
--- /dev/null
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t elementwiseBinaryOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/ElementwiseUnary.hpp b/delegate/src/ElementwiseUnary.hpp
new file mode 100644
index 0000000000..7527fa1383
--- /dev/null
+++ b/delegate/src/ElementwiseUnary.hpp
@@ -0,0 +1,83 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "DelegateUtils.hpp"
+
+#include <armnn/utility/Assert.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ armnn::UnaryOperation unaryOperation)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ armnn::ElementwiseUnaryDescriptor descriptor(unaryOperation);
+ bool isSupported = false;
+
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsElementwiseUnarySupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // Connect
+ return Connect(*layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Fill.hpp b/delegate/src/Fill.hpp
new file mode 100644
index 0000000000..6e63d6d256
--- /dev/null
+++ b/delegate/src/Fill.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitFillOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
new file mode 100644
index 0000000000..ad981cd63b
--- /dev/null
+++ b/delegate/src/FullyConnected.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp
new file mode 100644
index 0000000000..a004f2cf12
--- /dev/null
+++ b/delegate/src/Gather.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t gatherOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Lstm.hpp b/delegate/src/Lstm.hpp
new file mode 100644
index 0000000000..f151c43573
--- /dev/null
+++ b/delegate/src/Lstm.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Normalization.hpp b/delegate/src/Normalization.hpp
new file mode 100644
index 0000000000..7a73e6154a
--- /dev/null
+++ b/delegate/src/Normalization.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitNormalizationOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t normalizationOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Pad.hpp b/delegate/src/Pad.hpp
new file mode 100644
index 0000000000..cb7652fb7c
--- /dev/null
+++ b/delegate/src/Pad.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitPadOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t padOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
new file mode 100644
index 0000000000..ff3d668545
--- /dev/null
+++ b/delegate/src/Pooling.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitPoolingOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t poolingOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp
new file mode 100644
index 0000000000..31196233f9
--- /dev/null
+++ b/delegate/src/Quantization.hpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
new file mode 100644
index 0000000000..89a24b0aa0
--- /dev/null
+++ b/delegate/src/Redefine.hpp
@@ -0,0 +1,43 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp
new file mode 100644
index 0000000000..be40b64ad4
--- /dev/null
+++ b/delegate/src/Resize.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitResizeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t resizeOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Round.hpp b/delegate/src/Round.hpp
new file mode 100644
index 0000000000..e3e2b4ca3a
--- /dev/null
+++ b/delegate/src/Round.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitFloorOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Slice.hpp b/delegate/src/Slice.hpp
new file mode 100644
index 0000000000..433a95e7bf
--- /dev/null
+++ b/delegate/src/Slice.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t sliceOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp
new file mode 100644
index 0000000000..b22fa9bf67
--- /dev/null
+++ b/delegate/src/Softmax.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t softmaxOperatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp
new file mode 100644
index 0000000000..4f69a73281
--- /dev/null
+++ b/delegate/src/SpaceDepth.hpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
new file mode 100644
index 0000000000..2d5823da84
--- /dev/null
+++ b/delegate/src/Transpose.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ return kTfLiteError;
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index f8a8aca139..5cbdb6f356 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -4,11 +4,115 @@
//
#include <armnn_delegate.hpp>
+
+#include "Activation.hpp"
+#include "ArgMinMax.hpp"
+#include "BatchSpace.hpp"
+#include "Comparison.hpp"
+#include "Convolution.hpp"
+#include "Control.hpp"
+#include "ElementwiseBinary.hpp"
+#include "ElementwiseUnary.hpp"
+#include "Fill.hpp"
+#include "FullyConnected.hpp"
+#include "Gather.hpp"
+#include "Lstm.hpp"
+#include "Normalization.hpp"
+#include "Pad.hpp"
+#include "Pooling.hpp"
+#include "Quantization.hpp"
+#include "Redefine.hpp"
+#include "Resize.hpp"
+#include "Round.hpp"
+#include "Slice.hpp"
+#include "Softmax.hpp"
+#include "SpaceDepth.hpp"
+#include "Transpose.hpp"
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/context_util.h>
+
#include <algorithm>
+#include <sstream>
namespace armnnDelegate
{
+DelegateOptions TfLiteArmnnDelegateOptionsDefault()
+{
+ DelegateOptions options(armnn::Compute::CpuRef);
+ return options;
+}
+
+TfLiteDelegate* TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
+{
+ auto* armnnDelegate = new ::armnnDelegate::Delegate(options);
+ return armnnDelegate->GetDelegate();
+}
+
+void TfLiteArmnnDelegateDelete(TfLiteDelegate* tfLiteDelegate)
+{
+ if (tfLiteDelegate != nullptr)
+ {
+ delete static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_);
+ }
+}
+
+TfLiteStatus DoPrepare(TfLiteContext* tfLiteContext, TfLiteDelegate* tfLiteDelegate)
+{
+ TfLiteIntArray* supportedOperators =
+ static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_)->IdentifyOperatorsToDelegate(tfLiteContext);
+
+ // ArmNN Delegate Registration
+ static const TfLiteRegistration kArmnnSubgraphRegistration = {
+ // ArmnnSubgraph Init
+ .init = [](TfLiteContext* tfLiteContext, const char* buffer, size_t length) -> void* {
+ const TfLiteDelegateParams* parameters = reinterpret_cast<const TfLiteDelegateParams*>(buffer);
+
+ return static_cast<void*>(ArmnnSubgraph::Create(
+ tfLiteContext, parameters, static_cast<::armnnDelegate::Delegate*>(parameters->delegate->data_)));
+ },
+ // ArmnnSubgraph Free
+ .free = [](TfLiteContext* tfLiteContext, void* buffer) -> void {
+ if (buffer != nullptr)
+ {
+ delete static_cast<ArmnnSubgraph*>(buffer);
+ }
+ },
+ // ArmnnSubgraph Prepare
+ .prepare = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
+ if (tfLiteNode->user_data == nullptr)
+ {
+ return kTfLiteError;
+ }
+
+ return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Prepare(tfLiteContext);
+ },
+ // ArmnnSubgraph Invoke
+ .invoke = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
+ if (tfLiteNode->user_data == nullptr)
+ {
+ return kTfLiteError;
+ }
+
+ return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Invoke(tfLiteContext, tfLiteNode);
+ },
+
+ .profiling_string = nullptr,
+ .builtin_code = kTfLiteBuiltinDelegate,
+ .custom_name = "TfLiteArmNnDelegate",
+ .version = 1,
+ };
+
+ const TfLiteStatus status =
+ tfLiteContext->ReplaceNodeSubsetsWithDelegateKernels(
+ tfLiteContext, kArmnnSubgraphRegistration, supportedOperators, tfLiteDelegate);
+
+ TfLiteIntArrayFree(supportedOperators);
+ return status;
+
+}
+
Delegate::Delegate(armnnDelegate::DelegateOptions options)
: m_Runtime(nullptr, nullptr),
m_Options(std::move(options))
@@ -46,7 +150,7 @@ Delegate::Delegate(armnnDelegate::DelegateOptions options)
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnDelegate: Created TfLite ArmNN delegate.");
}
-TfLiteIntArray* Delegate::CollectOperatorsToDelegate(TfLiteContext* tfLiteContext)
+TfLiteIntArray* Delegate::IdentifyOperatorsToDelegate(TfLiteContext* tfLiteContext)
{
TfLiteIntArray* executionPlan = nullptr;
if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
@@ -55,8 +159,8 @@ TfLiteIntArray* Delegate::CollectOperatorsToDelegate(TfLiteContext* tfLiteContex
return nullptr;
}
- // Null INetworkPtr
- armnn::INetworkPtr nullNetworkPtr(nullptr, nullptr);
+ // Delegate data with null network
+ DelegateData delegateData(m_Options.GetBackends());
TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
nodesToDelegate->size = 0;
@@ -77,7 +181,7 @@ TfLiteIntArray* Delegate::CollectOperatorsToDelegate(TfLiteContext* tfLiteContex
}
if (ArmnnSubgraph::VisitNode(
- nullNetworkPtr, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
+ delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
{
// node is not supported by ArmNN
continue;
@@ -86,9 +190,7 @@ TfLiteIntArray* Delegate::CollectOperatorsToDelegate(TfLiteContext* tfLiteContex
nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
}
- std::sort(&nodesToDelegate->data[0],
- &nodesToDelegate->data[nodesToDelegate->size]);
-
+ std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
return nodesToDelegate;
}
@@ -97,6 +199,60 @@ TfLiteDelegate* Delegate::GetDelegate()
return &m_Delegate;
}
+TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const TfLiteIntArray* inputs,
+ std::vector<armnn::BindingPointInfo>& inputBindings)
+{
+ const size_t numInputs = inputs->size;
+ for (unsigned int i = 0; i < numInputs; ++i)
+ {
+ const int32_t tensorId = inputs->data[i];
+ const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
+
+ auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
+
+ auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(tensorInfo);
+
+ // Store for creating connections
+ delegateData.m_OutputSlotForNode[tensorId] = &outputSlot;
+
+ // Do not create bindings for constant inputs
+ if (tensor.allocation_type != kTfLiteMmapRo)
+ {
+ inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
+ }
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const TfLiteIntArray* outputs,
+ std::vector<armnn::BindingPointInfo>& outputBindings)
+{
+ const size_t numOutputs = outputs->size;
+ for (unsigned int i = 0; i < numOutputs; ++i)
+ {
+ const int32_t tensorId = outputs->data[i];
+ const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
+
+ auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
+
+ auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
+
+ ARMNN_ASSERT(delegateData.m_OutputSlotForNode[tensorId] != nullptr);
+ delegateData.m_OutputSlotForNode[tensorId]->Connect(layer->GetInputSlot(0));
+ outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
+ }
+
+ return kTfLiteOk;
+}
+
ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
const TfLiteDelegateParams* parameters,
const Delegate* delegate)
@@ -107,13 +263,28 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
return nullptr;
}
- // Construct ArmNN network
+ // Initialize DelegateData holds network and output slots information
+ DelegateData delegateData(delegate->m_Options.GetBackends());
+
+ // Build ArmNN Network
using NetworkOptions = std::vector<armnn::BackendOptions>;
armnn::NetworkOptions networkOptions = {};
armnn::NetworkId networkId;
- armnn::INetworkPtr network = armnn::INetwork::Create(networkOptions);
+ delegateData.m_Network = armnn::INetwork::Create(networkOptions);
- // Parse TfLite delegate nodes to ArmNN nodes
+ delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(parameters->nodes_to_replace->size, nullptr);
+
+ std::vector<armnn::BindingPointInfo> inputBindings;
+ std::vector<armnn::BindingPointInfo> outputBindings;
+
+ // Add input layer
+ auto status = AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings);
+ if (status != kTfLiteOk)
+ {
+ throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Inputs to the network!");
+ }
+
+ // Parse TfLite delegate nodes to ArmNN
for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
{
const int nodeIndex = parameters->nodes_to_replace->data[i];
@@ -126,25 +297,59 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
throw armnn::Exception("TfLiteArmnnDelegate: Unable to get node registration: " + nodeIndex);
}
- if (VisitNode(network, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
+ if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
{
throw armnn::Exception("TfLiteArmnnDelegate: Unable to parse node: " + nodeIndex);
}
}
- // Optimise Arm NN network
- armnn::IOptimizedNetworkPtr optNet =
- armnn::Optimize(*network, delegate->m_Options.GetBackends(), delegate->m_Runtime->GetDeviceSpec());
+ // Add Output layer
+ status = AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings);
+ if (status != kTfLiteOk)
+ {
+ throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Outputs to the network!");
+ }
+
+ // Optimize ArmNN network
+ armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
+ try
+ {
+
+ optNet = armnn::Optimize(*(delegateData.m_Network),
+ delegate->m_Options.GetBackends(),
+ delegate->m_Runtime->GetDeviceSpec());
+ }
+ catch (std::exception &ex)
+ {
+ std::stringstream exMessage;
+ exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from optimize.";
+ throw armnn::Exception(exMessage.str());
+ }
if (!optNet)
{
- // Optimize Failed
+ // Optimize failed
throw armnn::Exception("TfLiteArmnnDelegate: Unable to optimize the network!");
}
- // Load graph into runtime
- delegate->m_Runtime->LoadNetwork(networkId, std::move(optNet));
+
+ try
+ {
+ // Load graph into runtime
+ auto loadingStatus = delegate->m_Runtime->LoadNetwork(networkId, std::move(optNet));
+ if (loadingStatus != armnn::Status::Success)
+ {
+ // Optimize failed
+ throw armnn::Exception("TfLiteArmnnDelegate: Network could not be loaded!");;
+ }
+ }
+ catch (std::exception& ex)
+ {
+ std::stringstream exMessage;
+ exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
+ throw armnn::Exception(exMessage.str());
+ }
// Create a new SubGraph with networkId and runtime
- return new ArmnnSubgraph(networkId, delegate->m_Runtime.get());
+ return new ArmnnSubgraph(networkId, delegate->m_Runtime.get(), inputBindings, outputBindings);
}
TfLiteStatus ArmnnSubgraph::Prepare(TfLiteContext* tfLiteContext)
@@ -152,34 +357,419 @@ TfLiteStatus ArmnnSubgraph::Prepare(TfLiteContext* tfLiteContext)
return kTfLiteOk;
}
-TfLiteStatus ArmnnSubgraph::Invoke(TfLiteContext* tfLiteContext)
+TfLiteStatus ArmnnSubgraph::Invoke(TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode)
{
- /// Get the Input Tensors and OutputTensors from the context
- /// Execute the network
- //m_Runtime->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
+ // Prepare inputs
+ armnn::InputTensors inputTensors;
+ size_t inputIndex = 0;
+ for (auto inputIdx : tflite::TfLiteIntArrayView(tfLiteNode->inputs))
+ {
+ TfLiteTensor* tensor = &tfLiteContext->tensors[inputIdx];
+ if (tensor->allocation_type != kTfLiteMmapRo)
+ {
+ const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
+ const armnn::ConstTensor inputTensor(inputBinding.second, tensor->data.data);
+ inputTensors.emplace_back(inputIdx, inputTensor);
- return kTfLiteOk;
+ ++inputIndex;
+ }
+ }
+
+ // Prepare outputs
+ armnn::OutputTensors outputTensors;
+ size_t outputIndex = 0;
+ for (auto outputIdx : tflite::TfLiteIntArrayView(tfLiteNode->outputs))
+ {
+ const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIndex];
+ TfLiteTensor* tensor = &tfLiteContext->tensors[outputIdx];
+ const armnn::Tensor outputTensor(outputBinding.second, tensor->data.data);
+ outputTensors.emplace_back(outputIdx, outputTensor);
+
+ ++outputIndex;
+ }
+
+ // Run graph
+ auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
+ return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
}
-TfLiteStatus ArmnnSubgraph::VisitNode(armnn::INetworkPtr& network,
+TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteRegistration* tfLiteRegistration,
TfLiteNode* tfLiteNode,
int nodeIndex)
{
- /*
- * Take the node and check what operator it is and VisitXXXLayer()
- * In the VisitXXXLayer() function parse TfLite node to Arm NN Layer and add it to tho network graph
- *switch (tfLiteRegistration->builtin_code)
- * {
- * case kTfLiteBuiltinAbs:
- * return VisitAbsLayer(...);
- * ...
- * default:
- * return kTfLiteError;
- * }
- */
- return kTfLiteError;
+ switch (tfLiteRegistration->builtin_code)
+ {
+ case kTfLiteBuiltinAbs:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Abs);
+ case kTfLiteBuiltinAdd:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinAdd);
+ case kTfLiteBuiltinArgMax:
+ return VisitArgMinMaxOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinArgMax);
+ case kTfLiteBuiltinArgMin:
+ return VisitArgMinMaxOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinArgMin);
+ case kTfLiteBuiltinAveragePool2d:
+ return VisitPoolingOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinAveragePool2d);
+ case kTfLiteBuiltinBatchToSpaceNd:
+ return VisitBatchToSpaceNdOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinBatchToSpaceNd);
+ case kTfLiteBuiltinConcatenation:
+ return VisitControlOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinConcatenation);
+ case kTfLiteBuiltinConv2d:
+ return VisitConvolutionOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinConv2d);
+ case kTfLiteBuiltinDepthToSpace:
+ return VisitDepthToSpaceOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDepthToSpace);
+ case kTfLiteBuiltinDepthwiseConv2d:
+ return VisitConvolutionOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDepthwiseConv2d);
+ case kTfLiteBuiltinDequantize:
+ return VisitDequantizeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDequantize);
+ case kTfLiteBuiltinDiv:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinDiv);
+ case kTfLiteBuiltinElu:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinElu);
+ case kTfLiteBuiltinEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinEqual);
+ case kTfLiteBuiltinExp:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Exp);
+ case kTfLiteBuiltinExpandDims:
+ return VisitExpandDimsOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinExpandDims);
+ case kTfLiteBuiltinFill:
+ return VisitFillOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinFill);
+ case kTfLiteBuiltinFloor:
+ return VisitFloorOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinFloor);
+ case kTfLiteBuiltinFullyConnected:
+ return VisitFullyConnectedOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinFullyConnected);
+ case kTfLiteBuiltinGather:
+ return VisitGatherOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinGather);
+ case kTfLiteBuiltinGatherNd:
+ return VisitGatherOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinGatherNd);
+ case kTfLiteBuiltinGreater:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinGreater);
+ case kTfLiteBuiltinGreaterEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinGreaterEqual);
+ case kTfLiteBuiltinHardSwish:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinHardSwish);
+ case kTfLiteBuiltinL2Normalization:
+ return VisitNormalizationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinL2Normalization);
+ case kTfLiteBuiltinL2Pool2d:
+ return VisitPoolingOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinL2Pool2d);
+ case kTfLiteBuiltinLess:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLess);
+ case kTfLiteBuiltinLessEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLessEqual);
+ case kTfLiteBuiltinLocalResponseNormalization:
+ return VisitNormalizationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLocalResponseNormalization);
+ case kTfLiteBuiltinLogistic:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLogistic);
+ case kTfLiteBuiltinLogSoftmax:
+ return VisitSoftmaxOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLogSoftmax);
+ case kTfLiteBuiltinLstm:
+ return VisitLstmOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinLstm);
+ case kTfLiteBuiltinMaxPool2d:
+ return VisitPoolingOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMaxPool2d);
+ case kTfLiteBuiltinMaximum:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMaximum);
+ case kTfLiteBuiltinMean:
+ return VisitControlOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMean);
+ case kTfLiteBuiltinMinimum:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMinimum);
+ case kTfLiteBuiltinMul:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMul);
+ case kTfLiteBuiltinNeg:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Neg);
+ case kTfLiteBuiltinNotEqual:
+ return VisitComparisonOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinNotEqual);
+ case kTfLiteBuiltinPad:
+ return VisitPadOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinPad);
+ case kTfLiteBuiltinPadv2:
+ return VisitPadOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinPadv2);
+ case kTfLiteBuiltinPrelu:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinPrelu);
+ case kTfLiteBuiltinQuantize:
+ return VisitQuantizeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinQuantize);
+ case kTfLiteBuiltinRank:
+ return VisitControlOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinRank);
+ case kTfLiteBuiltinRelu:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinRelu);
+ case kTfLiteBuiltinReluN1To1:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinReluN1To1);
+ case kTfLiteBuiltinRelu6:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinRelu6);
+ case kTfLiteBuiltinReshape:
+ return VisitReshapeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinReshape);
+ case kTfLiteBuiltinResizeBilinear:
+ return VisitResizeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinResizeBilinear);
+ case kTfLiteBuiltinResizeNearestNeighbor:
+ return VisitResizeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinResizeNearestNeighbor);
+ case kTfLiteBuiltinRsqrt:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Rsqrt);
+ case kTfLiteBuiltinSqrt:
+ return VisitElementwiseUnaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ armnn::UnaryOperation::Sqrt);
+ case kTfLiteBuiltinSqueeze:
+ return VisitSqueezeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSqueeze);
+ case kTfLiteBuiltinStridedSlice:
+ return VisitSliceOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinStridedSlice);
+ case kTfLiteBuiltinTranspose:
+ return VisitTransposeOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinTranspose);
+ case kTfLiteBuiltinTransposeConv:
+ return VisitConvolutionOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinTransposeConv);
+ case kTfLiteBuiltinSoftmax:
+ return VisitSoftmaxOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSoftmax);
+ case kTfLiteBuiltinSpaceToBatchNd:
+ return VisitSpaceToBatchNdOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSpaceToBatchNd);
+ case kTfLiteBuiltinSpaceToDepth:
+ return VisitSpaceToDepthOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSpaceToDepth);
+ case kTfLiteBuiltinSub:
+ return VisitElementwiseBinaryOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSub);
+ case kTfLiteBuiltinTanh:
+ return VisitActivationOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinTanh);
+ default:
+ return kTfLiteError;
+ }
}
} // armnnDelegate namespace \ No newline at end of file
diff --git a/delegate/src/test/AbsTest.cpp b/delegate/src/test/AbsTest.cpp
new file mode 100644
index 0000000000..f9c345e6d2
--- /dev/null
+++ b/delegate/src/test/AbsTest.cpp
@@ -0,0 +1,98 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseUnaryTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+TEST_SUITE("AbsTest")
+{
+
+TEST_CASE ("AbsTestFloat32")
+{
+ using namespace tflite;
+
+ const std::vector<int32_t> inputShape { { 3, 1, 2} };
+ std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(BuiltinOperator_ABS,
+ ::tflite::TensorType_FLOAT32,
+ inputShape);
+ const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+ // Create TfLite Interpreters
+ std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&armnnDelegateInterpreter) == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter != nullptr);
+ CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+ std::unique_ptr<Interpreter> tfLiteInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&tfLiteInterpreter) == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+ CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+ // Create the ArmNN Delegate
+ auto delegateOptions = TfLiteArmnnDelegateOptionsDefault();
+ auto armnnDelegate = TfLiteArmnnDelegateCreate(delegateOptions);
+ CHECK(armnnDelegate != nullptr);
+ // Modify armnnDelegateInterpreter to use armnnDelegate
+ CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(armnnDelegate) == kTfLiteOk);
+
+ // Set input data
+ std::vector<float> inputValues
+ {
+ -0.1f, -0.2f, -0.3f,
+ 0.1f, 0.2f, 0.3f
+ };
+ auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+ auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ tfLiteDelageInputData[i] = inputValues[i];
+ }
+
+ auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+ auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ armnnDelegateInputData[i] = inputValues[i];
+ }
+
+ // Run EnqueWorkload
+ CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+ // Compare output data
+ auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+ auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+
+ auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+ auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+
+ for (size_t i = 0; i < inputValues.size(); i++)
+ {
+ CHECK(std::abs(inputValues[i]) == armnnDelegateOutputData[i]);
+ CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
+ }
+}
+
+}
+
+} // namespace armnnDelegate
+
+
+
diff --git a/delegate/src/test/ArmnnDelegateTest.cpp b/delegate/src/test/ArmnnDelegateTest.cpp
index 8bd58f6286..fdf786ff99 100644
--- a/delegate/src/test/ArmnnDelegateTest.cpp
+++ b/delegate/src/test/ArmnnDelegateTest.cpp
@@ -3,20 +3,17 @@
// SPDX-License-Identifier: MIT
//
-#include <armnn_delegate.hpp>
-
-#ifndef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
-#endif
#include <doctest/doctest.h>
+#include <armnn_delegate.hpp>
+
#include "tensorflow/lite/kernels/builtin_op_kernels.h"
#include <tensorflow/lite/interpreter.h>
-namespace
+namespace armnnDelegate
{
-
TEST_SUITE("ArmnnDelegate")
{
@@ -50,5 +47,4 @@ TEST_CASE ("ArmnnDelegate Registered")
}
-} // anonymous namespace
-
+} // namespace armnnDelegate
diff --git a/delegate/src/test/SqrtTest.cpp b/delegate/src/test/SqrtTest.cpp
new file mode 100644
index 0000000000..df3534dcdb
--- /dev/null
+++ b/delegate/src/test/SqrtTest.cpp
@@ -0,0 +1,97 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseUnaryTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+TEST_SUITE("SqrtTest")
+{
+
+TEST_CASE ("SqrtTestFloat32")
+{
+ using namespace tflite;
+ const std::vector<int32_t> inputShape { { 3, 1, 2} };
+ std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(BuiltinOperator_SQRT,
+ ::tflite::TensorType_FLOAT32,
+ inputShape);
+
+ const Model* tfLiteModel = GetModel(modelBuffer.data());
+ // Create TfLite Interpreters
+ std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&armnnDelegateInterpreter) == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter != nullptr);
+ CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+ std::unique_ptr<Interpreter> tfLiteInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&tfLiteInterpreter) == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+ CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+ // Create the ArmNN Delegate
+ auto delegateOptions = TfLiteArmnnDelegateOptionsDefault();
+ auto armnnDelegate = TfLiteArmnnDelegateCreate(delegateOptions);
+ CHECK(armnnDelegate != nullptr);
+ // Modify armnnDelegateInterpreter to use armnnDelegate
+ CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(armnnDelegate) == kTfLiteOk);
+
+ // Set input data
+ std::vector<float> inputValues
+ {
+ 9.0f, 4.25f, 81.9f,
+ 0.1f, 0.9f, 169.0f
+ };
+
+ auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+ auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ tfLiteDelageInputData[i] = inputValues[i];
+ }
+
+ auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+ auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ armnnDelegateInputData[i] = inputValues[i];
+ }
+
+ // Run EnqueWorkload
+ CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+ // Compare output data
+ auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+ auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+ auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+ auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+ for (size_t i = 0; i < inputValues.size(); i++)
+ {
+ CHECK(std::sqrt(inputValues[i]) == armnnDelegateOutputData[i]);
+ CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
+ }
+
+}
+
+}
+
+} // namespace armnnDelegate
+
+
+