From 3504e425ac99467c80919768c4a1361c44b30353 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Wed, 3 May 2023 13:53:02 +0100 Subject: IVGCVSW-7605 IVGCVSW-7604 Implement Squeeze and ExpandDims operators for Classic and Opaque Delegate * Implemented unsupported operators in Classic Delegate. * Added unit tests. Signed-off-by: Matthew Sloyan Change-Id: Ib39eeea53c114b15943e8dc2e796ce64c40cb3a5 --- delegate/CMakeLists.txt | 4 + delegate/classic/src/Redefine.hpp | 187 ++++++++++++++++++++++++-- delegate/common/src/DelegateUtils.hpp | 52 ++++++++ delegate/opaque/src/Redefine.hpp | 237 +++++++++++++++++++++++++++++++++ delegate/opaque/src/armnn_delegate.cpp | 12 ++ delegate/test/ExpandDimsTest.cpp | 102 ++++++++++++++ delegate/test/RedefineTestHelper.hpp | 166 +++++++++++++++++++++-- delegate/test/SqueezeTest.cpp | 102 ++++++++++++++ 8 files changed, 838 insertions(+), 24 deletions(-) create mode 100644 delegate/test/ExpandDimsTest.cpp create mode 100644 delegate/test/SqueezeTest.cpp diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index 003dffa807..ef049133ca 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -150,6 +150,7 @@ if(BUILD_UNIT_TESTS) test/ElementwiseBinaryTestHelper.hpp test/ElementwiseUnaryTest.cpp test/ElementwiseUnaryTestHelper.hpp + test/ExpandDimsTest.cpp test/FillTest.cpp test/FillTestHelper.hpp test/FullyConnectedTest.cpp @@ -193,6 +194,7 @@ if(BUILD_UNIT_TESTS) test/ShapeTestHelper.hpp test/SliceTest.cpp test/SliceTestHelper.hpp + test/SqueezeTest.cpp test/StridedSliceTest.cpp test/StridedSliceTestHelper.hpp test/SplitTest.cpp @@ -280,6 +282,7 @@ if(BUILD_UNIT_TESTS) test/DepthwiseConvolution2dTest.cpp test/ElementwiseUnaryTestHelper.hpp test/ElementwiseUnaryTest.cpp + test/ExpandDimsTest.cpp test/FullyConnectedTest.cpp test/FullyConnectedTestHelper.hpp test/GatherTest.cpp @@ -319,6 +322,7 @@ if(BUILD_UNIT_TESTS) test/SoftmaxTestHelper.hpp test/SpaceDepthTest.cpp test/SpaceDepthTestHelper.hpp + test/SqueezeTest.cpp test/StridedSliceTest.cpp test/StridedSliceTestHelper.hpp test/TestUtils.hpp diff --git a/delegate/classic/src/Redefine.hpp b/delegate/classic/src/Redefine.hpp index 41c62c33c8..2c29083719 100644 --- a/delegate/classic/src/Redefine.hpp +++ b/delegate/classic/src/Redefine.hpp @@ -5,8 +5,6 @@ #pragma once -#include - #include #include @@ -231,13 +229,83 @@ TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { - armnn::IgnoreUnused(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - operatorCode); + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + auto* options = reinterpret_cast(tfLiteNode->builtin_data); + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + + std::vector squeezeDim; + // A single negative dim index is interpreted as a negative index in python + // Meaning the index will be the shape size plus the negative index value + if (options->num_squeeze_dims == 1 && options->squeeze_dims[0] < 0) + { + int32_t dim = static_cast(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0]; + squeezeDim.push_back(static_cast(dim)); + } + else + { + for (int32_t i = 0; i < options->num_squeeze_dims; ++i) + { + squeezeDim.push_back(static_cast(options->squeeze_dims[i])); + } + } + + armnn::TensorInfo outputTensorInfo = OutputShapeOfSqueeze(squeezeDim, inputTensorInfo); + + armnn::ReshapeDescriptor reshapeDesc; + reshapeDesc.m_TargetShape = outputTensorInfo.GetShape(); + + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC("SQUEEZE", + tfLiteContext, + IsReshapeSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outInfo, + reshapeDesc); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); - return kTfLiteError; + // try to connect the Constant Inputs if there are any + if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk) + { + return kTfLiteError; + } + + // Connect + return Connect(layer, tfLiteNode, delegateData); } TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData, @@ -246,13 +314,104 @@ TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { - armnn::IgnoreUnused(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - operatorCode); + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); - return kTfLiteError; + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]]; + if (!IsValid(tfLiteContext, tfLiteAxisTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + auto* axisTensorData = tflite::GetTensorData(&tfLiteAxisTensor); + int32_t axis = axisTensorData[0]; + + int32_t inputDimSize = static_cast(inputTensorInfo.GetShape().GetNumDimensions()); + if (axis > inputDimSize || axis < 0 - (inputDimSize + 1)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Axis must be in range " + "[0 - (inputDimSize + 1), inputDimSize] inclusive."); + return kTfLiteError; + } + + if(axis < 0) + { + axis = inputDimSize + axis + 1; + } + + std::vector shape(static_cast(inputDimSize) + 1); + unsigned int inputShapeIndex = 0; + for (unsigned int i = 0; i < static_cast(inputDimSize + 1); ++i) + { + if (i == static_cast(axis)) + { + shape[i] = 1; + } + else + { + shape[i] = inputTensorInfo.GetShape()[inputShapeIndex]; + ++inputShapeIndex; + } + } + + armnn::ReshapeDescriptor reshapeDesc; + reshapeDesc.m_TargetShape = armnn::TensorShape(static_cast(inputDimSize + 1), shape.data()); + + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC("EXPAND_DIMS", + tfLiteContext, + IsReshapeSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outInfo, + reshapeDesc); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputTensorInfo.SetShape(reshapeDesc.m_TargetShape); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk) + { + return kTfLiteError; + } + + // Connect + return Connect(layer, tfLiteNode, delegateData); } } // namespace armnnDelegate diff --git a/delegate/common/src/DelegateUtils.hpp b/delegate/common/src/DelegateUtils.hpp index 37fe9b5b84..1671a4c8cf 100644 --- a/delegate/common/src/DelegateUtils.hpp +++ b/delegate/common/src/DelegateUtils.hpp @@ -169,4 +169,56 @@ TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo, return kTfLiteOk; } +armnn::TensorInfo OutputShapeOfSqueeze(std::vector squeezeDims, + const armnn::TensorInfo& inputTensorInfo) +{ + static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 }; + + if (inputTensorInfo.GetNumDimensions() > 4) + { + std::stringstream ss; + ss << "Input tensor has unexpected number of dimensions:" + << inputTensorInfo.GetNumDimensions() + << " shape:" << inputTensorInfo.GetShape() + << " " + << CHECK_LOCATION().AsString(); + throw armnn::ParseException(ss.str()); + } + + if (squeezeDims.empty()) + { + squeezeDims.assign(dimensionSequence, dimensionSequence + inputTensorInfo.GetNumDimensions()); + } + + std::vector outputDims; + for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++) + { + bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end()); + auto currentDimension = inputTensorInfo.GetShape()[i]; + if (skipSqueeze || currentDimension != 1) + { + outputDims.push_back(currentDimension); + } + } + + if (outputDims.size() > 4) + { + std::stringstream ss; + ss << "Output tensor has unexpected number of dimensions:" + << inputTensorInfo.GetNumDimensions() + << " shape:" << inputTensorInfo.GetShape() + << " " + << CHECK_LOCATION().AsString(); + throw armnn::ParseException(ss.str()); + } + + armnn::TensorShape outShape = armnn::TensorShape(static_cast(outputDims.size()), outputDims.data()); + + // We need to preserve the tensor type and the quantization data as well + armnn::TensorInfo outTensorInfo = inputTensorInfo; + outTensorInfo.SetShape(outShape); + + return outTensorInfo; +} + } // namespace anonymous diff --git a/delegate/opaque/src/Redefine.hpp b/delegate/opaque/src/Redefine.hpp index dc424cff00..ce90af0812 100644 --- a/delegate/opaque/src/Redefine.hpp +++ b/delegate/opaque/src/Redefine.hpp @@ -259,4 +259,241 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData, return Connect(layer, tfLiteContext, tfLiteNode, delegateData); } +TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input indices and use to get input tensor. + int numInputs = 0; + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Gather output indices and use to get output tensors. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + auto* options = reinterpret_cast(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + + std::vector squeezeDim; + // A single negative dim index is interpreted as a negative index in python + // Meaning the index will be the shape size plus the negative index value + if (options->num_squeeze_dims == 1 && options->squeeze_dims[0] < 0) + { + int32_t dim = static_cast(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0]; + squeezeDim.push_back(static_cast(dim)); + } + else + { + for (int32_t i = 0; i < options->num_squeeze_dims; ++i) + { + squeezeDim.push_back(static_cast(options->squeeze_dims[i])); + } + } + + armnn::TensorInfo outputTensorInfo = OutputShapeOfSqueeze(squeezeDim, inputTensorInfo); + + armnn::ReshapeDescriptor reshapeDesc; + reshapeDesc.m_TargetShape = outputTensorInfo.GetShape(); + + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SQUEEZE", + tfLiteContext, + IsReshapeSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outInfo, + reshapeDesc); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk) + { + return kTfLiteError; + } + + // Connect + return Connect(layer, tfLiteContext, tfLiteNode, delegateData); +} + +TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input indices and use to get input tensor. + int numInputs = 0; + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]); + if (!IsValid(tfLiteContext, tfLiteAxisTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Gather output indices and use to get output tensors. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor); + + auto* axisTensorData = static_cast(TfLiteOpaqueTensorData(tfLiteAxisTensor)); + int32_t axis = axisTensorData[0]; + + int32_t inputDimSize = static_cast(inputTensorInfo.GetShape().GetNumDimensions()); + if (axis > inputDimSize || axis < 0 - (inputDimSize + 1)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Axis must be in range " + "[0 - (inputDimSize + 1), inputDimSize] inclusive."); + return kTfLiteError; + } + + if(axis < 0) + { + axis = inputDimSize + axis + 1; + } + + std::vector shape(static_cast(inputDimSize) + 1); + unsigned int inputShapeIndex = 0; + for (unsigned int i = 0; i < static_cast(inputDimSize + 1); ++i) + { + if (i == static_cast(axis)) + { + shape[i] = 1; + } + else + { + shape[i] = inputTensorInfo.GetShape()[inputShapeIndex]; + ++inputShapeIndex; + } + } + + armnn::ReshapeDescriptor reshapeDesc; + reshapeDesc.m_TargetShape = armnn::TensorShape(static_cast(inputDimSize + 1), shape.data()); + + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("EXPAND_DIMS", + tfLiteContext, + IsReshapeSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outInfo, + reshapeDesc); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputTensorInfo.SetShape(reshapeDesc.m_TargetShape); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk) + { + return kTfLiteError; + } + + // Connect + return Connect(layer, tfLiteContext, tfLiteNode, delegateData); +} + } diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index f7476d17c5..cae1ea502b 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -764,6 +764,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, nodeIndex, kTfLiteBuiltinExp, armnn::UnaryOperation::Exp); + case kTfLiteBuiltinExpandDims: + return VisitExpandDimsOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinExpandDims); case kTfLiteBuiltinFloor: return VisitFloorOperator(delegateData, tfLiteContext, @@ -1089,6 +1095,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, nodeIndex, kTfLiteBuiltinSqrt, armnn::UnaryOperation::Sqrt); + case kTfLiteBuiltinSqueeze: + return VisitSqueezeOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinSqueeze); case kTfLiteBuiltinStridedSlice: return VisitStridedSliceOperator(delegateData, tfLiteContext, diff --git a/delegate/test/ExpandDimsTest.cpp b/delegate/test/ExpandDimsTest.cpp new file mode 100644 index 0000000000..8c21f731cc --- /dev/null +++ b/delegate/test/ExpandDimsTest.cpp @@ -0,0 +1,102 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RedefineTestHelper.hpp" + +namespace armnnDelegate +{ + +void ExpandDimsSimpleTest(std::vector& backends) +{ + // Set input data + std::vector inputShape { 2, 2, 1 }; + std::vector outputShape { 1, 2, 2, 1 }; + std::vector axis { 0 }; + + std::vector inputValues = { 1, 2, 3, 4 }; + std::vector expectedOutputValues = { 1, 2, 3, 4 }; + + RedefineTest(tflite::BuiltinOperator_EXPAND_DIMS, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + axis); +} + +void ExpandDimsWithNegativeAxisTest(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 2, 2 }; + std::vector outputShape { 1, 2, 2, 1 }; + std::vector axis { -1 }; + + std::vector inputValues = { 1, 2, 3, 4 }; + std::vector expectedOutputValues = { 1, 2, 3, 4 }; + + RedefineTest(tflite::BuiltinOperator_EXPAND_DIMS, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + axis); +} + +TEST_SUITE("ExpandDims_GpuAccTests") +{ + +TEST_CASE ("ExpandDims_Simple_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + ExpandDimsSimpleTest(backends); +} + +TEST_CASE ("ExpandDims_With_Negative_Axis_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + ExpandDimsWithNegativeAxisTest(backends); +} + +} // TEST_SUITE("ExpandDims_GpuAccTests") + +TEST_SUITE("ExpandDims_CpuAccTests") +{ + +TEST_CASE ("ExpandDims_Simple_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + ExpandDimsSimpleTest(backends); +} + +TEST_CASE ("ExpandDims_With_Negative_Axis_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + ExpandDimsWithNegativeAxisTest(backends); +} + +} // TEST_SUITE("ExpandDims_CpuAccTests") + +TEST_SUITE("ExpandDims_CpuRefTests") +{ + +TEST_CASE ("ExpandDims_Simple_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ExpandDimsSimpleTest(backends); +} + +TEST_CASE ("ExpandDims_With_Negative_Axis_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + ExpandDimsWithNegativeAxisTest(backends); +} + +} // TEST_SUITE("ExpandDims_CpuRefTests") + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/test/RedefineTestHelper.hpp b/delegate/test/RedefineTestHelper.hpp index 80631ccf8d..af9b446ae5 100644 --- a/delegate/test/RedefineTestHelper.hpp +++ b/delegate/test/RedefineTestHelper.hpp @@ -21,7 +21,7 @@ namespace { -std::vector CreateRedefineTfLiteModel( +std::vector CreateReshapeTfLiteModel( tflite::BuiltinOperator redefineOperatorCode, tflite::TensorType tensorType, const std::vector& inputTensorShape, @@ -141,6 +141,127 @@ std::vector CreateRedefineTfLiteModel( flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } +std::vector CreateRedefineTfLiteModel( + tflite::BuiltinOperator redefineOperatorCode, + tflite::TensorType tensorType, + const std::vector& inputTensorShape, + const std::vector& outputTensorShape, + const std::vector& squeezeOrAxisData, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder)); + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ quantScale }), + flatBufferBuilder.CreateVector({ quantOffset })); + + auto inputTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(inputTensorShape.data(), + inputTensorShape.size()), + tensorType, + 1, + flatBufferBuilder.CreateString("input"), + quantizationParameters); + + std::vector> tensors; + std::vector operatorInputs; + std::vector subgraphInputs; + flatbuffers::Offset operatorBuiltinOptions; + tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_SqueezeOptions; + + if (redefineOperatorCode == tflite::BuiltinOperator_SQUEEZE) + { + buffers.push_back(CreateBuffer(flatBufferBuilder)); + auto outputTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputTensorShape.data(), + outputTensorShape.size()), + tensorType, + 2, + flatBufferBuilder.CreateString("output"), + quantizationParameters); + tensors = { inputTensor, outputTensor}; + operatorInputs = {0}; + subgraphInputs = {0}; + operatorBuiltinOptions = + CreateSqueezeOptions(flatBufferBuilder, + flatBufferBuilder.CreateVector(squeezeOrAxisData.data(), + squeezeOrAxisData.size())).Union(); + + operatorBuiltinOptionsType = BuiltinOptions_SqueezeOptions; + } + else if (redefineOperatorCode == tflite::BuiltinOperator_EXPAND_DIMS) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(squeezeOrAxisData.data()), + sizeof(int32_t) * squeezeOrAxisData.size()))); + auto shapeTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector( { 1 } ), + tflite::TensorType_INT32, + 2, + flatBufferBuilder.CreateString("axis")); + + buffers.push_back(CreateBuffer(flatBufferBuilder)); + auto outputTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputTensorShape.data(), + outputTensorShape.size()), + tensorType, + 3, + flatBufferBuilder.CreateString("output"), + quantizationParameters); + + tensors = { inputTensor, outputTensor, shapeTensor }; + operatorInputs = {0, 2}; + subgraphInputs = {0, 2}; + operatorBuiltinOptions = CreateExpandDimsOptions(flatBufferBuilder).Union(); + + operatorBuiltinOptionsType = BuiltinOptions_ExpandDimsOptions; + } + + const std::vector operatorOutputs{1}; + flatbuffers::Offset redefineOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); + + const std::vector subgraphOutputs{1}; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&redefineOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: Redefine Operator Model"); + flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, + redefineOperatorCode); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + template void RedefineTest(tflite::BuiltinOperator redefineOperatorCode, tflite::TensorType tensorType, @@ -149,20 +270,45 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode, std::vector& outputShape, std::vector& inputValues, std::vector& expectedOutputValues, - std::vector& targetShape, + std::vector& additionalData, bool useOption = true, float quantScale = 1.0f, int quantOffset = 0) { using namespace delegateTestInterpreter; - std::vector modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode, - tensorType, - inputShape, - outputShape, - targetShape, - useOption, - quantScale, - quantOffset); + + std::vector modelBuffer; + if (redefineOperatorCode == tflite::BuiltinOperator_EXPAND_DIMS) + { + modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode, + tensorType, + inputShape, + outputShape, + additionalData, + quantScale, + quantOffset); + } + else if (redefineOperatorCode == tflite::BuiltinOperator_RESHAPE) + { + modelBuffer = CreateReshapeTfLiteModel(redefineOperatorCode, + tensorType, + inputShape, + outputShape, + additionalData, + useOption, + quantScale, + quantOffset); + } + else if (redefineOperatorCode == tflite::BuiltinOperator_SQUEEZE) + { + modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode, + tensorType, + inputShape, + outputShape, + additionalData, + quantScale, + quantOffset); + } // Setup interpreter with just TFLite Runtime. auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); diff --git a/delegate/test/SqueezeTest.cpp b/delegate/test/SqueezeTest.cpp new file mode 100644 index 0000000000..01122c95e6 --- /dev/null +++ b/delegate/test/SqueezeTest.cpp @@ -0,0 +1,102 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RedefineTestHelper.hpp" + +namespace armnnDelegate +{ + +void SqueezeSimpleTest(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 2, 2, 1 }; + std::vector outputShape { 2, 2 }; + std::vector squeezeDims { }; + + std::vector inputValues = { 1, 2, 3, 4 }; + std::vector expectedOutputValues = { 1, 2, 3, 4 }; + + RedefineTest(tflite::BuiltinOperator_SQUEEZE, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + squeezeDims); +} + +void SqueezeWithDimsTest(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 2, 2, 1 }; + std::vector outputShape { 1, 2, 2 }; + std::vector squeezeDims { -1 }; + + std::vector inputValues = { 1, 2, 3, 4 }; + std::vector expectedOutputValues = { 1, 2, 3, 4 }; + + RedefineTest(tflite::BuiltinOperator_SQUEEZE, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + squeezeDims); +} + +TEST_SUITE("Squeeze_GpuAccTests") +{ + +TEST_CASE ("Squeeze_Simple_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + SqueezeSimpleTest(backends); +} + +TEST_CASE ("Squeeze_With_Dims_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + SqueezeWithDimsTest(backends); +} + +} // TEST_SUITE("Squeeze_GpuAccTests") + +TEST_SUITE("Squeeze_CpuAccTests") +{ + +TEST_CASE ("Squeeze_Simple_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + SqueezeSimpleTest(backends); +} + +TEST_CASE ("Squeeze_With_Dims_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + SqueezeWithDimsTest(backends); +} + +} // TEST_SUITE("Squeeze_CpuAccTests") + +TEST_SUITE("Squeeze_CpuRefTests") +{ + +TEST_CASE ("Squeeze_Simple_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + SqueezeSimpleTest(backends); +} + +TEST_CASE ("Squeeze_With_Dims_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + SqueezeWithDimsTest(backends); +} + +} // TEST_SUITE("Squeeze_CpuRefTests") + +} // namespace armnnDelegate \ No newline at end of file -- cgit v1.2.1