From 66da7510362d00c6d5b6e8c1fe7f10145efe764b Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Fri, 20 Nov 2020 14:50:54 +0000 Subject: IVGCVSW-5544 Fix FullyConnected Delegate tests * Correct input shape Signed-off-by: Narumol Prangnawarat Change-Id: I9d1fe4c8ef32a9dfba7f7fdd6af314e9a522fce8 --- delegate/src/FullyConnected.hpp | 40 ++++++----- delegate/src/test/FullyConnectedTest.cpp | 99 +++++++++++++++++--------- delegate/src/test/FullyConnectedTestHelper.hpp | 2 +- 3 files changed, 87 insertions(+), 54 deletions(-) diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp index b79f6a2bb2..53251f7c55 100644 --- a/delegate/src/FullyConnected.hpp +++ b/delegate/src/FullyConnected.hpp @@ -129,6 +129,27 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData, biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor)); } + armnn::TensorInfo reshapedTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + + if (inputTensorInfo.GetNumDimensions() > 2) + { + // Calculate reshape to flatten to 2D [batch_size, input_size] + std::vector reshapedDimensions(2); + reshapedDimensions[1] = weightsTensorInfo.GetShape()[1]; + reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1]; + + if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Failed to deduce input tensor shape from filter size #%d #%d node #%d: ", + reshapedDimensions[1], operatorCode, nodeIndex); + return kTfLiteError; + } + + reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() }); + } + armnn::FullyConnectedDescriptor descriptor; descriptor.m_TransposeWeightMatrix = true; descriptor.m_BiasEnabled = biasEnabled; @@ -141,7 +162,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData, IsFullyConnectedSupported, delegateData.m_Backends, isSupported, - inputTensorInfo, + reshapedTensorInfo, outputTensorInfo, weightsTensorInfo, biasTensorInfo, @@ -184,22 +205,6 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData, if (inputTensorInfo.GetNumDimensions() > 2) { // Add reshape to flatten to 2D [batch_size, input_size] - std::vector reshapedDimensions(2); - reshapedDimensions[1] = weightsTensorInfo.GetShape()[1]; - reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1]; - - if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0) - { - TF_LITE_MAYBE_KERNEL_LOG( - tfLiteContext, - "TfLiteArmnnDelegate: Failed to deduce input tensor shape from filter size #%d #%d node #%d: ", - reshapedDimensions[1], operatorCode, nodeIndex); - return kTfLiteError; - } - - armnn::TensorInfo reshapedTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); - reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() }); - armnn::ReshapeDescriptor reshapeDescriptor; reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape(); reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor); @@ -210,7 +215,6 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData, // Connect delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(reshapeLayer->GetInputSlot(0)); reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); - armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); delegateData.m_OutputSlotForNode[tfLiteNode->outputs->data[0]] = &outputSlot; } diff --git a/delegate/src/test/FullyConnectedTest.cpp b/delegate/src/test/FullyConnectedTest.cpp index 1d33381d6e..018f7f5190 100644 --- a/delegate/src/test/FullyConnectedTest.cpp +++ b/delegate/src/test/FullyConnectedTest.cpp @@ -8,9 +8,6 @@ namespace { -TEST_SUITE("FullyConnectedTest") -{ - void FullyConnectedFp32Test(std::vector& backends) { std::vector inputTensorShape { 1, 4, 1, 1 }; @@ -61,68 +58,100 @@ void FullyConnectedActicationTest(std::vector& backends) weightsData); } -void FullyConnectedUint8Test(std::vector& backends) +void FullyConnectedInt8Test(std::vector& backends) { std::vector inputTensorShape { 1, 4, 2, 1 }; std::vector weightsTensorShape { 1, 4 }; std::vector biasTensorShape { 1 }; std::vector outputTensorShape { 2, 1 }; - std::vector inputValues = { 1, 2, 3, 4, 10, 20, 30, 40 }; - std::vector weightsData = { 2, 3, 4, 5 }; + std::vector inputValues = { 1, 2, 3, 4, 5, 10, 15, 20 }; + std::vector weightsData = { 2, 3, 4, 5 }; - std::vector expectedOutputValues = { (40 + 10) / 2, (400 + 10) / 2 }; + std::vector expectedOutputValues = { 25, 105 }; // (40 + 10) / 2, (200 + 10) / 2 // bias is set std::vector biasData = { 10 } in the model // input and weights quantization scale 1.0f and offset 0 in the model // output quantization scale 2.0f and offset 0 in the model - FullyConnectedTest(backends, - ::tflite::TensorType_UINT8, - tflite::ActivationFunctionType_NONE, - inputTensorShape, - weightsTensorShape, - biasTensorShape, - outputTensorShape, - inputValues, - expectedOutputValues, - weightsData); + FullyConnectedTest(backends, + ::tflite::TensorType_INT8, + tflite::ActivationFunctionType_NONE, + inputTensorShape, + weightsTensorShape, + biasTensorShape, + outputTensorShape, + inputValues, + expectedOutputValues, + weightsData); } -TEST_CASE ("FULLY_CONNECTED_FP32_GpuAcc_Test") +TEST_SUITE("FullyConnected_GpuAccTests") +{ + +TEST_CASE ("FullyConnected_FP32_GpuAcc_Test") { - std::vector backends = { armnn::Compute::GpuAcc, - armnn::Compute::CpuRef }; + std::vector backends = { armnn::Compute::GpuAcc }; FullyConnectedFp32Test(backends); } -TEST_CASE ("FULLY_CONNECTED_FP32_CpuAcc_Test") +TEST_CASE ("FullyConnected_Int8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + FullyConnectedInt8Test(backends); +} + +TEST_CASE ("FullyConnected_Activation_GpuAcc_Test") { - std::vector backends = { armnn::Compute::CpuAcc, - armnn::Compute::CpuRef }; + std::vector backends = { armnn::Compute::GpuAcc }; + FullyConnectedActicationTest(backends); +} + +} // End of TEST_SUITE("FullyConnected_GpuAccTests") + +TEST_SUITE("FullyConnected_CpuAccTests") +{ + +TEST_CASE ("FullyConnected_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; FullyConnectedFp32Test(backends); } -TEST_CASE ("FULLY_CONNECTED_UINT8_GpuAcc_Test") +TEST_CASE ("FullyConnected_Int8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + FullyConnectedInt8Test(backends); +} + +TEST_CASE ("FullyConnected_Activation_CpuAcc_Test") { - std::vector backends = { armnn::Compute::GpuAcc, - armnn::Compute::CpuRef }; - FullyConnectedUint8Test(backends); + std::vector backends = { armnn::Compute::CpuAcc }; + FullyConnectedActicationTest(backends); +} + +} // End of TEST_SUITE("FullyConnected_CpuAccTests") + +TEST_SUITE("FullyConnected_CpuRefTests") +{ + +TEST_CASE ("FullyConnected_FP32_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + FullyConnectedFp32Test(backends); } -TEST_CASE ("FULLY_CONNECTED_UINT8_CpuAcc_Test") +TEST_CASE ("FullyConnected_Int8_CpuRef_Test") { - std::vector backends = { armnn::Compute::GpuAcc, - armnn::Compute::CpuRef }; - FullyConnectedUint8Test(backends); + std::vector backends = { armnn::Compute::CpuRef }; + FullyConnectedInt8Test(backends); } -TEST_CASE ("FULLY_CONNECTED_Activation_GpuAcc_Test") +TEST_CASE ("FullyConnected_Activation_CpuRef_Test") { - std::vector backends = { armnn::Compute::GpuAcc, - armnn::Compute::CpuRef }; + std::vector backends = { armnn::Compute::CpuRef }; FullyConnectedActicationTest(backends); } -} // End of TEST_SUITE("FullyConnectedTest") +} // End of TEST_SUITE("FullyConnected_CpuRefTests") } // anonymous namespace \ No newline at end of file diff --git a/delegate/src/test/FullyConnectedTestHelper.hpp b/delegate/src/test/FullyConnectedTestHelper.hpp index 4eed9580f1..4b30424d86 100644 --- a/delegate/src/test/FullyConnectedTestHelper.hpp +++ b/delegate/src/test/FullyConnectedTestHelper.hpp @@ -41,7 +41,7 @@ std::vector CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType, sizeof(T) * weightsData.size())); auto biasTensorType = ::tflite::TensorType_FLOAT32; - if (tensorType == ::tflite::TensorType_UINT8) + if (tensorType == ::tflite::TensorType_INT8) { biasTensorType = ::tflite::TensorType_INT32; std::vector biasData = { 10 }; -- cgit v1.2.1