From 89c5a9e6ecfa169512c43e659b1833f9a3c41d90 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Wed, 20 Jan 2021 17:48:07 +0000 Subject: IVGCVSW-5391 'ArmNN TfLiteDelegate: Implement the Space/Depth operators' * Added DEPTH_TO_SPACE and SPACE_TO_DEPTH operators support Signed-off-by: Sadik Armagan Change-Id: I2595f759181bd7339127e7b114b850b534210dd5 --- delegate/CMakeLists.txt | 2 + delegate/TensorFlowLiteDelegateSupport.md | 4 + delegate/src/SpaceDepth.hpp | 114 ++++++++++++++-- delegate/src/test/SpaceDepthTest.cpp | 207 +++++++++++++++++++++++++++++ delegate/src/test/SpaceDepthTestHelper.hpp | 166 +++++++++++++++++++++++ 5 files changed, 479 insertions(+), 14 deletions(-) create mode 100644 delegate/src/test/SpaceDepthTest.cpp create mode 100644 delegate/src/test/SpaceDepthTestHelper.hpp (limited to 'delegate') diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index 2862faf9e6..eda5b935e3 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -145,6 +145,8 @@ if(BUILD_UNIT_TESTS) src/test/ResizeTestHelper.hpp src/test/SoftmaxTest.cpp src/test/SoftmaxTestHelper.hpp + src/test/SpaceDepthTest.cpp + src/test/SpaceDepthTestHelper.hpp src/test/SplitTest.cpp src/test/SplitTestHelper.hpp src/test/TestUtils.hpp diff --git a/delegate/TensorFlowLiteDelegateSupport.md b/delegate/TensorFlowLiteDelegateSupport.md index ed1124a1ff..a5d4faf3ef 100644 --- a/delegate/TensorFlowLiteDelegateSupport.md +++ b/delegate/TensorFlowLiteDelegateSupport.md @@ -20,6 +20,8 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato * CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE +* DEPTH_TO_SPACE + * DEPTHWISE_CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE * DEQUANTIZE @@ -94,6 +96,8 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato * SOFTMAX +* SPACE_TO_DEPTH + * SPLIT * SPLIT_V diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp index 603e0f2fff..03859b6fcb 100644 --- a/delegate/src/SpaceDepth.hpp +++ b/delegate/src/SpaceDepth.hpp @@ -5,8 +5,6 @@ #pragma once -#include - #include #include #include @@ -21,13 +19,57 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { - armnn::IgnoreUnused(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - operatorCode); + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + armnn::SpaceToDepthDescriptor descriptor; + auto* params = reinterpret_cast(tfLiteNode->builtin_data); + descriptor.m_BlockSize = params->block_size; + + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsSpaceToDepthSupported, + delegateData.m_Backends, + isSupported, + inputTensorInfo, + outInfo, + descriptor); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + // Add a SpaceToDepth layer + armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); - return kTfLiteError; + // Connect + return Connect(layer, tfLiteNode, delegateData); } TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData, @@ -36,13 +78,57 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { - armnn::IgnoreUnused(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - operatorCode); + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + armnn::DepthToSpaceDescriptor descriptor; + auto* params = reinterpret_cast(tfLiteNode->builtin_data); + descriptor.m_BlockSize = params->block_size; + + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsDepthToSpaceSupported, + delegateData.m_Backends, + isSupported, + inputTensorInfo, + outInfo, + descriptor); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + // Add a DepthToSpace layer + armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); - return kTfLiteError; + // Connect + return Connect(layer, tfLiteNode, delegateData); } } // namespace armnnDelegate diff --git a/delegate/src/test/SpaceDepthTest.cpp b/delegate/src/test/SpaceDepthTest.cpp new file mode 100644 index 0000000000..f80e749b87 --- /dev/null +++ b/delegate/src/test/SpaceDepthTest.cpp @@ -0,0 +1,207 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "SpaceDepthTestHelper.hpp" + +#include + +#include +#include + +#include + +namespace armnnDelegate +{ + +void DepthToSpaceFp32Test(std::vector& backends, int blockSize) +{ + // Set input data + std::vector inputShape { 1, 2, 2, 4 }; + std::vector outputShape { 1, 4, 4, 1 }; + + std::vector inputValues = { 1.f, 2.f, 3.f, 4.f, + 5.f, 6.f, 7.f, 8.f, + 9.f, 10.f, 11.f, 12.f, + 13.f, 14.f, 15.f, 16.f }; + + std::vector expectedOutputValues = { 1.f, 2.f, 5.f, 6.f, + 3.f, 4.f, 7.f, 8.f, + 9.f, 10.f, 13.f, 14.f, + 11.f, 12.f, 15.f, 16.f }; + + SpaceDepthTest(tflite::BuiltinOperator_DEPTH_TO_SPACE, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + blockSize); +} + +void DepthToSpaceUint8Test(std::vector& backends, int blockSize) +{ + // Set input data + std::vector inputShape { 2, 1, 1, 4 }; + std::vector outputShape { 2, 2, 2, 1 }; + + std::vector inputValues = { 1, 2, 3, 4, + 5, 6, 7, 8 }; + + std::vector expectedOutputValues = { 1, 2, 3, 4, + 5, 6, 7, 8 }; + + SpaceDepthTest(tflite::BuiltinOperator_DEPTH_TO_SPACE, + ::tflite::TensorType_UINT8, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + blockSize); +} + +void SpaceToDepthFp32Test(std::vector& backends, int blockSize) +{ + // Set input data + std::vector inputShape { 1, 2, 2, 2 }; + std::vector outputShape { 1, 1, 1, 8 }; + + std::vector inputValues = { 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f }; + std::vector expectedOutputValues = { 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f }; + + SpaceDepthTest(tflite::BuiltinOperator_SPACE_TO_DEPTH, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + blockSize); +} + +void SpaceToDepthUint8Test(std::vector& backends, int blockSize) +{ + // Set input data + std::vector inputShape { 1, 2, 2, 1 }; + std::vector outputShape { 1, 1, 1, 4 }; + + std::vector inputValues = { 1, 2, 3, 2 }; + std::vector expectedOutputValues = { 1, 2, 3, 2 }; + + SpaceDepthTest(tflite::BuiltinOperator_SPACE_TO_DEPTH, + ::tflite::TensorType_UINT8, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + blockSize); +} + +TEST_SUITE("DepthToSpace_CpuRefTests") +{ + +TEST_CASE ("DepthToSpaceFp32Test_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + DepthToSpaceFp32Test(backends, 2); +} + +TEST_CASE ("DepthToSpaceUint8Test_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + DepthToSpaceUint8Test(backends, 2); +} + +} // TEST_SUITE("DepthToSpace_CpuRefTests") + + +TEST_SUITE("DepthToSpace_CpuAccTests") +{ + +TEST_CASE ("DepthToSpaceFp32Test_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + DepthToSpaceFp32Test(backends, 2); +} + +TEST_CASE ("DepthToSpaceUint8Test_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + DepthToSpaceUint8Test(backends, 2); +} + +} // TEST_SUITE("DepthToSpace_CpuAccTests") + +TEST_SUITE("DepthToSpace_GpuAccTests") +{ + +TEST_CASE ("DepthToSpaceFp32Test_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + DepthToSpaceFp32Test(backends, 2); +} + +TEST_CASE ("DepthToSpaceUint8Test_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + DepthToSpaceUint8Test(backends, 2); +} + +} // TEST_SUITE("DepthToSpace_GpuAccTests") + +TEST_SUITE("SpaceToDepth_CpuRefTests") +{ + +TEST_CASE ("SpaceToDepthFp32Test_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + SpaceToDepthFp32Test(backends, 2); +} + +TEST_CASE ("SpaceToDepthUint8Test_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + SpaceToDepthUint8Test(backends, 2); +} + +} // TEST_SUITE("SpaceToDepth_CpuRefTests") + +TEST_SUITE("SpaceToDepth_CpuAccTests") +{ + +TEST_CASE ("SpaceToDepthFp32Test_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + SpaceToDepthFp32Test(backends, 2); +} + +TEST_CASE ("SpaceToDepthUint8Test_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + SpaceToDepthUint8Test(backends, 2); +} + +} // TEST_SUITE("SpaceToDepth_CpuAccTests") + +TEST_SUITE("SpaceToDepth_GpuAccTests") +{ + +TEST_CASE ("SpaceToDepthFp32Test_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + SpaceToDepthFp32Test(backends, 2); +} + +TEST_CASE ("SpaceToDepthUint8Test_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + SpaceToDepthUint8Test(backends, 2); +} + +} // TEST_SUITE("SpaceToDepth_GpuAccTests") + +} // namespace armnnDelegate diff --git a/delegate/src/test/SpaceDepthTestHelper.hpp b/delegate/src/test/SpaceDepthTestHelper.hpp new file mode 100644 index 0000000000..d9a783c6a7 --- /dev/null +++ b/delegate/src/test/SpaceDepthTestHelper.hpp @@ -0,0 +1,166 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace +{ +std::vector CreateSpaceDepthTfLiteModel(tflite::BuiltinOperator spaceDepthOperatorCode, + tflite::TensorType tensorType, + const std::vector & inputTensorShape, + const std::vector & outputTensorShape, + int32_t blockSize) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ 1.0f }), + flatBufferBuilder.CreateVector({ 0 })); + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + + std::array, 2> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(inputTensorShape.data(), + inputTensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("input"), + quantizationParameters); + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputTensorShape.data(), + outputTensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("output"), + quantizationParameters); + + const std::vector operatorInputs({0}); + const std::vector operatorOutputs({1}); + + flatbuffers::Offset spaceDepthOperator; + flatbuffers::Offset modelDescription; + flatbuffers::Offset operatorCode; + + switch (spaceDepthOperatorCode) + { + case tflite::BuiltinOperator_SPACE_TO_DEPTH: + spaceDepthOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + BuiltinOptions_SpaceToDepthOptions, + CreateSpaceToDepthOptions(flatBufferBuilder, blockSize).Union()); + modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: SPACE_TO_DEPTH Operator Model"); + operatorCode = CreateOperatorCode(flatBufferBuilder, + tflite::BuiltinOperator_SPACE_TO_DEPTH); + break; + case tflite::BuiltinOperator_DEPTH_TO_SPACE: + spaceDepthOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + BuiltinOptions_DepthToSpaceOptions, + CreateDepthToSpaceOptions(flatBufferBuilder, blockSize).Union()); + flatBufferBuilder.CreateString("ArmnnDelegate: DEPTH_TO_SPACE Operator Model"); + operatorCode = CreateOperatorCode(flatBufferBuilder, + tflite::BuiltinOperator_DEPTH_TO_SPACE); + break; + default: + break; + } + const std::vector subgraphInputs({0}); + const std::vector subgraphOutputs({1}); + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&spaceDepthOperator, 1)); + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + flatBufferBuilder.Finish(flatbufferModel); + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +template +void SpaceDepthTest(tflite::BuiltinOperator spaceDepthOperatorCode, + tflite::TensorType tensorType, + std::vector& backends, + std::vector& inputShape, + std::vector& outputShape, + std::vector& inputValues, + std::vector& expectedOutputValues, + int32_t blockSize = 2) +{ + using namespace tflite; + std::vector modelBuffer = CreateSpaceDepthTfLiteModel(spaceDepthOperatorCode, + tensorType, + inputShape, + outputShape, + blockSize); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); + armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); + + // Run EnqueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + // Compare output data + armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues); +} + +} // anonymous namespace -- cgit v1.2.1