aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin May <kevin.may@arm.com>2021-05-18 09:57:43 +0100
committerKevin May <kevin.may@arm.com>2021-05-18 12:42:30 +0000
commit4cad860254f4249bd13f0fb43dfa04e1c84a8c91 (patch)
treeaf894ec5a294e319b1d34b6054bac6dbf17409c3
parent25ab3a8326a9e2c52c84b2747fa72907109a695d (diff)
downloadarmnn-4cad860254f4249bd13f0fb43dfa04e1c84a8c91.tar.gz
IVGCVSW-6026 Delegate VisitUnpackOperator not supported error
* Determine Splitter outputs to correctly perform validation * Add validation for a Reshape Layer * Add back the Num6 Axis2 test to CpuAcc and GpuAcc Signed-off-by: Kevin May <kevin.may@arm.com> Change-Id: I1e8108ce50d81420057d7a8b098a07eda63c5c8d
-rw-r--r--delegate/src/Unpack.hpp52
-rw-r--r--delegate/src/test/UnpackTest.cpp24
2 files changed, 62 insertions, 14 deletions
diff --git a/delegate/src/Unpack.hpp b/delegate/src/Unpack.hpp
index 87200ff431..4163163243 100644
--- a/delegate/src/Unpack.hpp
+++ b/delegate/src/Unpack.hpp
@@ -118,9 +118,24 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
}
const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
+ // Determine the shape of the Splitter layer outputs for validation
+ armnn::TensorShape splitOutShape = armnn::TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
+ unpackDimSizes.data());
+
+ std::vector<armnn::TensorInfo> splitterOutputs;
+ for (unsigned int outputIndex = 0; outputIndex < outputTensorInfos.size(); ++outputIndex)
+ {
+ splitterOutputs.push_back(armnn::TensorInfo(splitOutShape,
+ outputTensorInfos[outputIndex].get().GetDataType(),
+ outputTensorInfos[outputIndex].get().GetQuantizationScale(),
+ outputTensorInfos[outputIndex].get().GetQuantizationOffset()));
+ }
+ std::vector<std::reference_wrapper<armnn::TensorInfo>> splitterOutputTensorInfos(splitterOutputs.begin(),
+ splitterOutputs.end());
+
if (!delegateData.m_Network)
{
- // Check if supported
+ // Check if splitter is supported
bool isSupported = false;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
tfLiteContext,
@@ -128,11 +143,30 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
delegateData.m_Backends,
isSupported,
inputTensorInfo,
- outputTensorInfos,
+ splitterOutputTensorInfos,
splitDesc);
return isSupported ? kTfLiteOk : kTfLiteError;
}
+ // Create Reshape descriptor from the first outputTensorInfo to validate a single Reshape layer
+ // Use this descriptor later when creating every ReshapeLayer as all Reshape Layers should be the same
+ armnn::ReshapeDescriptor reshapeDescriptor;
+ reshapeDescriptor.m_TargetShape = outputTensorInfos[0].get().GetShape();
+
+ if (!delegateData.m_Network)
+ {
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsReshapeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ splitterOutputTensorInfos[0],
+ outputTensorInfos[0],
+ reshapeDescriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ };
+
std::string splitterLayerName("Unpack Splitter");
armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
@@ -147,28 +181,18 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
// Connect the input slots
delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(splitterLayer->GetInputSlot(0));
- armnn::TensorShape splitOutShape = armnn::TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
- unpackDimSizes.data());
-
// Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex)
{
- armnn::TensorInfo outputTensorInfo = outputTensorInfos[outputIndex];
-
std::string reshapeLayerName("Unpack Reshape");
- armnn::ReshapeDescriptor reshapeDescriptor;
- reshapeDescriptor.m_TargetShape = outputTensorInfo.GetShape();
armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
reshapeLayerName.c_str());
-
ARMNN_ASSERT(reshapeLayer != nullptr);
- splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(armnn::TensorInfo(splitOutShape,
- outputTensorInfo.GetDataType(),
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset()));
+ splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(splitterOutputTensorInfos[outputIndex]);
splitterLayer->GetOutputSlot(outputIndex).Connect(reshapeLayer->GetInputSlot(0));
+ armnn::TensorInfo outputTensorInfo = outputTensorInfos[outputIndex];
reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
armnn::IOutputSlot& slot = reshapeLayer->GetOutputSlot(0);
diff --git a/delegate/src/test/UnpackTest.cpp b/delegate/src/test/UnpackTest.cpp
index e9ab6f73b9..c036f649ef 100644
--- a/delegate/src/test/UnpackTest.cpp
+++ b/delegate/src/test/UnpackTest.cpp
@@ -122,6 +122,12 @@ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
}
+TEST_CASE ("Unpack_Fp32_Axis2_Num6_CpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
// Uint8
TEST_CASE ("Unpack_Uint8_Axis0_Num4_CpuAcc_Test")
{
@@ -129,6 +135,12 @@ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
}
+TEST_CASE ("Unpack_Uint8_Axis2_Num6_CpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
} // End of Unpack_CpuAccTests
TEST_SUITE("Unpack_GpuAccTests")
@@ -141,6 +153,12 @@ std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
}
+TEST_CASE ("Unpack_Fp32_Axis2_Num6_GpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
// Uint8
TEST_CASE ("Unpack_Uint8_Axis0_Num4_GpuAcc_Test")
{
@@ -148,6 +166,12 @@ std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
}
+TEST_CASE ("Unpack_Uint8_Axis2_Num6_GpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
} // End of Unpack_GpuAccTests
// End of Unpack Test Suite