From 4cad860254f4249bd13f0fb43dfa04e1c84a8c91 Mon Sep 17 00:00:00 2001 From: Kevin May Date: Tue, 18 May 2021 09:57:43 +0100 Subject: IVGCVSW-6026 Delegate VisitUnpackOperator not supported error * Determine Splitter outputs to correctly perform validation * Add validation for a Reshape Layer * Add back the Num6 Axis2 test to CpuAcc and GpuAcc Signed-off-by: Kevin May Change-Id: I1e8108ce50d81420057d7a8b098a07eda63c5c8d --- delegate/src/Unpack.hpp | 52 +++++++++++++++++++++++++++++----------- delegate/src/test/UnpackTest.cpp | 24 +++++++++++++++++++ 2 files changed, 62 insertions(+), 14 deletions(-) (limited to 'delegate') diff --git a/delegate/src/Unpack.hpp b/delegate/src/Unpack.hpp index 87200ff431..4163163243 100644 --- a/delegate/src/Unpack.hpp +++ b/delegate/src/Unpack.hpp @@ -118,9 +118,24 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData, } const std::vector> outputTensorInfos(outputs.begin(), outputs.end()); + // Determine the shape of the Splitter layer outputs for validation + armnn::TensorShape splitOutShape = armnn::TensorShape(static_cast(unpackDimSizes.size()), + unpackDimSizes.data()); + + std::vector splitterOutputs; + for (unsigned int outputIndex = 0; outputIndex < outputTensorInfos.size(); ++outputIndex) + { + splitterOutputs.push_back(armnn::TensorInfo(splitOutShape, + outputTensorInfos[outputIndex].get().GetDataType(), + outputTensorInfos[outputIndex].get().GetQuantizationScale(), + outputTensorInfos[outputIndex].get().GetQuantizationOffset())); + } + std::vector> splitterOutputTensorInfos(splitterOutputs.begin(), + splitterOutputs.end()); + if (!delegateData.m_Network) { - // Check if supported + // Check if splitter is supported bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, tfLiteContext, @@ -128,11 +143,30 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData, delegateData.m_Backends, isSupported, inputTensorInfo, - outputTensorInfos, + splitterOutputTensorInfos, splitDesc); return isSupported ? kTfLiteOk : kTfLiteError; } + // Create Reshape descriptor from the first outputTensorInfo to validate a single Reshape layer + // Use this descriptor later when creating every ReshapeLayer as all Reshape Layers should be the same + armnn::ReshapeDescriptor reshapeDescriptor; + reshapeDescriptor.m_TargetShape = outputTensorInfos[0].get().GetShape(); + + if (!delegateData.m_Network) + { + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsReshapeSupported, + delegateData.m_Backends, + isSupported, + splitterOutputTensorInfos[0], + outputTensorInfos[0], + reshapeDescriptor); + return isSupported ? kTfLiteOk : kTfLiteError; + }; + std::string splitterLayerName("Unpack Splitter"); armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc, @@ -147,28 +181,18 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData, // Connect the input slots delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(splitterLayer->GetInputSlot(0)); - armnn::TensorShape splitOutShape = armnn::TensorShape(static_cast(unpackDimSizes.size()), - unpackDimSizes.data()); - // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter. for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex) { - armnn::TensorInfo outputTensorInfo = outputTensorInfos[outputIndex]; - std::string reshapeLayerName("Unpack Reshape"); - armnn::ReshapeDescriptor reshapeDescriptor; - reshapeDescriptor.m_TargetShape = outputTensorInfo.GetShape(); armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor, reshapeLayerName.c_str()); - ARMNN_ASSERT(reshapeLayer != nullptr); - splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(armnn::TensorInfo(splitOutShape, - outputTensorInfo.GetDataType(), - outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset())); + splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(splitterOutputTensorInfos[outputIndex]); splitterLayer->GetOutputSlot(outputIndex).Connect(reshapeLayer->GetInputSlot(0)); + armnn::TensorInfo outputTensorInfo = outputTensorInfos[outputIndex]; reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); armnn::IOutputSlot& slot = reshapeLayer->GetOutputSlot(0); diff --git a/delegate/src/test/UnpackTest.cpp b/delegate/src/test/UnpackTest.cpp index e9ab6f73b9..c036f649ef 100644 --- a/delegate/src/test/UnpackTest.cpp +++ b/delegate/src/test/UnpackTest.cpp @@ -122,6 +122,12 @@ std::vector backends = {armnn::Compute::CpuAcc}; UnpackAxis0Num4Test(tflite::TensorType_FLOAT32, backends); } +TEST_CASE ("Unpack_Fp32_Axis2_Num6_CpuAcc_Test") +{ +std::vector backends = {armnn::Compute::CpuAcc}; +UnpackAxis2Num6Test(tflite::TensorType_FLOAT32, backends); +} + // Uint8 TEST_CASE ("Unpack_Uint8_Axis0_Num4_CpuAcc_Test") { @@ -129,6 +135,12 @@ std::vector backends = {armnn::Compute::CpuAcc}; UnpackAxis0Num4Test(tflite::TensorType_UINT8, backends); } +TEST_CASE ("Unpack_Uint8_Axis2_Num6_CpuAcc_Test") +{ +std::vector backends = {armnn::Compute::CpuAcc}; +UnpackAxis2Num6Test(tflite::TensorType_UINT8, backends); +} + } // End of Unpack_CpuAccTests TEST_SUITE("Unpack_GpuAccTests") @@ -141,6 +153,12 @@ std::vector backends = {armnn::Compute::GpuAcc}; UnpackAxis0Num4Test(tflite::TensorType_FLOAT32, backends); } +TEST_CASE ("Unpack_Fp32_Axis2_Num6_GpuAcc_Test") +{ +std::vector backends = {armnn::Compute::GpuAcc}; +UnpackAxis2Num6Test(tflite::TensorType_FLOAT32, backends); +} + // Uint8 TEST_CASE ("Unpack_Uint8_Axis0_Num4_GpuAcc_Test") { @@ -148,6 +166,12 @@ std::vector backends = {armnn::Compute::GpuAcc}; UnpackAxis0Num4Test(tflite::TensorType_UINT8, backends); } +TEST_CASE ("Unpack_Uint8_Axis2_Num6_GpuAcc_Test") +{ +std::vector backends = {armnn::Compute::GpuAcc}; +UnpackAxis2Num6Test(tflite::TensorType_UINT8, backends); +} + } // End of Unpack_GpuAccTests // End of Unpack Test Suite -- cgit v1.2.1