aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-04-21 14:03:28 +0100
committerSadik Armagan <sadik.armagan@arm.com>2021-04-23 06:55:49 +0000
commit937565b90bb33eea785898c44db2942dd7af56e7 (patch)
treec033c6a364a24475e7f8e723519b26ddfddaedc3
parent48f011e1d5337ff580ae2c315f670bfc15413598 (diff)
downloadarmnn-937565b90bb33eea785898c44db2942dd7af56e7.tar.gz
IVGCVSW-5430 'Add CAST Operator Support to Delegate'
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I6da711950b8e7d3c0d5cbd443e91eb36700ac4c8
-rw-r--r--delegate/CMakeLists.txt2
-rw-r--r--delegate/src/Redefine.hpp58
-rw-r--r--delegate/src/armnn_delegate.cpp6
-rw-r--r--delegate/src/test/CastTest.cpp107
-rw-r--r--delegate/src/test/CastTestHelper.hpp157
-rw-r--r--docs/01_03_delegate.dox2
6 files changed, 332 insertions, 0 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index d72089ca85..fea07b37ec 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -118,6 +118,8 @@ if(BUILD_UNIT_TESTS)
src/test/ArmnnDelegateTest.cpp
src/test/BatchSpaceTest.cpp
src/test/BatchSpaceTestHelper.hpp
+ src/test/CastTest.cpp
+ src/test/CastTestHelper.hpp
src/test/ComparisonTest.cpp
src/test/ComparisonTestHelper.hpp
src/test/ControlTest.cpp
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index 3df26cacc3..766e600c7c 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -18,6 +18,64 @@
namespace armnnDelegate
{
+TfLiteStatus VisitCastOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsCastSupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputTensorInfo,
+ outInfo);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitCastOperator will be called again to add the layer to the network as seen further below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a Cast layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+
TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo,
const std::vector<int32_t>& targetShape,
armnn::ReshapeDescriptor& reshapeDesc)
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 2b07fc7098..4c625a9867 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -486,6 +486,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinBatchToSpaceNd);
+ case kTfLiteBuiltinCast:
+ return VisitCastOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinCast);
case kTfLiteBuiltinConcatenation:
return VisitControlOperator(delegateData,
tfLiteContext,
diff --git a/delegate/src/test/CastTest.cpp b/delegate/src/test/CastTest.cpp
new file mode 100644
index 0000000000..623c045247
--- /dev/null
+++ b/delegate/src/test/CastTest.cpp
@@ -0,0 +1,107 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "CastTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void CastUint8ToFp32Test(std::vector<armnn::BackendId>& backends)
+{
+ std::vector<int32_t> inputShape {1, 3, 2, 3};
+
+ std::vector<uint8_t> inputValues { 1, 3, 1, 3, 1, 3, 1, 3, 1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+
+ std::vector<float> expectedOutputValues { 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f,
+ 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+
+ CastTest<uint8_t, float>(::tflite::TensorType_UINT8,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ inputValues,
+ expectedOutputValues);
+}
+
+void CastInt32ToFp32Test(std::vector<armnn::BackendId>& backends)
+{
+ std::vector<int32_t> inputShape {1, 3, 2, 3};
+
+ std::vector<int32_t> inputValues { -1, -3, -1, -3, -1, -3, -1, -3, 1,
+ 3, 1, 3, 1, 2, 1, 3, 1, 3 };
+
+ std::vector<float> expectedOutputValues { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
+ 3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+
+ CastTest<int32_t, float>(::tflite::TensorType_INT32,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ inputValues,
+ expectedOutputValues);
+}
+
+// CAST Test Suite
+TEST_SUITE("CAST_CpuRefTests")
+{
+
+TEST_CASE ("CAST_UINT8_TO_FP32_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ CastUint8ToFp32Test(backends);
+}
+
+TEST_CASE ("CAST_INT32_TO_FP32_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ CastInt32ToFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("CAST_CpuAccTests")
+{
+
+TEST_CASE ("CAST_UINT8_TO_FP32_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ CastUint8ToFp32Test(backends);
+}
+
+TEST_CASE ("CAST_INT32_TO_FP32_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ CastInt32ToFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("CAST_GpuAccTests")
+{
+
+TEST_CASE ("CAST_UINT8_TO_FP32_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ CastUint8ToFp32Test(backends);
+}
+
+TEST_CASE ("CAST_INT32_TO_FP32_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ CastInt32ToFp32Test(backends);
+}
+
+}
+// End of CAST Test Suite
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/CastTestHelper.hpp b/delegate/src/test/CastTestHelper.hpp
new file mode 100644
index 0000000000..6b1d5ee947
--- /dev/null
+++ b/delegate/src/test/CastTestHelper.hpp
@@ -0,0 +1,157 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+std::vector<char> CreateCastTfLiteModel(tflite::TensorType inputTensorType,
+ tflite::TensorType outputTensorType,
+ const std::vector <int32_t>& tensorShape,
+ float quantScale = 1.0f,
+ int quantOffset = 0)
+{
+ using namespace tflite;
+ flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+ std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+ buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+
+ auto quantizationParameters =
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>({quantScale}),
+ flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
+
+ std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+ tensors[0] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+ tensorShape.size()),
+ inputTensorType,
+ 0,
+ flatBufferBuilder.CreateString("input"),
+ quantizationParameters);
+ tensors[1] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+ tensorShape.size()),
+ outputTensorType,
+ 0,
+ flatBufferBuilder.CreateString("output"),
+ quantizationParameters);
+
+ const std::vector<int32_t> operatorInputs({0});
+ const std::vector<int32_t> operatorOutputs({1});
+
+ flatbuffers::Offset<Operator> castOperator =
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ BuiltinOptions_CastOptions,
+ CreateCastOptions(flatBufferBuilder).Union());
+
+ flatbuffers::Offset<flatbuffers::String> modelDescription =
+ flatBufferBuilder.CreateString("ArmnnDelegate: CAST Operator Model");
+ flatbuffers::Offset<OperatorCode> operatorCode =
+ CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_CAST);
+
+ const std::vector<int32_t> subgraphInputs({0});
+ const std::vector<int32_t> subgraphOutputs({1});
+ flatbuffers::Offset<SubGraph> subgraph =
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(&castOperator, 1));
+
+ flatbuffers::Offset<Model> flatbufferModel =
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+ flatBufferBuilder.Finish(flatbufferModel);
+ return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+ flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template<typename T, typename K>
+void CastTest(tflite::TensorType inputTensorType,
+ tflite::TensorType outputTensorType,
+ std::vector<armnn::BackendId>& backends,
+ std::vector<int32_t>& shape,
+ std::vector<T>& inputValues,
+ std::vector<K>& expectedOutputValues,
+ float quantScale = 1.0f,
+ int quantOffset = 0)
+{
+ using namespace tflite;
+ std::vector<char> modelBuffer = CreateCastTfLiteModel(inputTensorType,
+ outputTensorType,
+ shape,
+ quantScale,
+ quantOffset);
+
+ const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+ // Create TfLite Interpreters
+ std::unique_ptr<Interpreter> armnnDelegate;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&armnnDelegate) == kTfLiteOk);
+ CHECK(armnnDelegate != nullptr);
+ CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+
+ std::unique_ptr<Interpreter> tfLiteDelegate;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&tfLiteDelegate) == kTfLiteOk);
+ CHECK(tfLiteDelegate != nullptr);
+ CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+
+ // Create the ArmNN Delegate
+ armnnDelegate::DelegateOptions delegateOptions(backends);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
+ CHECK(theArmnnDelegate != nullptr);
+
+ // Modify armnnDelegateInterpreter to use armnnDelegate
+ CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+ // Set input data
+ armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
+ armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
+
+ // Run EnqueWorkload
+ CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
+ CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+ // Compare output data
+ armnnDelegate::CompareOutputData<K>(tfLiteDelegate,
+ armnnDelegate,
+ shape,
+ expectedOutputValues,
+ 0);
+
+ tfLiteDelegate.reset(nullptr);
+ armnnDelegate.reset(nullptr);
+}
+
+} // anonymous namespace
diff --git a/docs/01_03_delegate.dox b/docs/01_03_delegate.dox
index f6d8e7660d..a90883ba84 100644
--- a/docs/01_03_delegate.dox
+++ b/docs/01_03_delegate.dox
@@ -45,6 +45,8 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato
- BATCH_TO_SPACE_ND
+- CAST
+
- CONCATENATION, Supported Fused Activation: RELU , RELU6 , TANH, NONE
- CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE