aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-11-20 14:50:54 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2020-11-20 17:38:19 +0000
commit66da7510362d00c6d5b6e8c1fe7f10145efe764b (patch)
tree61f9a4e7df7bcf588ce6cd0c955e4410057d3727
parent0849bfdb137df62fe7e5aa8893faa537b504a0df (diff)
downloadarmnn-66da7510362d00c6d5b6e8c1fe7f10145efe764b.tar.gz
IVGCVSW-5544 Fix FullyConnected Delegate tests
* Correct input shape Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I9d1fe4c8ef32a9dfba7f7fdd6af314e9a522fce8
-rw-r--r--delegate/src/FullyConnected.hpp40
-rw-r--r--delegate/src/test/FullyConnectedTest.cpp99
-rw-r--r--delegate/src/test/FullyConnectedTestHelper.hpp2
3 files changed, 87 insertions, 54 deletions
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index b79f6a2bb2..53251f7c55 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -129,6 +129,27 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
}
+ armnn::TensorInfo reshapedTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+
+ if (inputTensorInfo.GetNumDimensions() > 2)
+ {
+ // Calculate reshape to flatten to 2D [batch_size, input_size]
+ std::vector<unsigned int> reshapedDimensions(2);
+ reshapedDimensions[1] = weightsTensorInfo.GetShape()[1];
+ reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
+
+ if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Failed to deduce input tensor shape from filter size #%d #%d node #%d: ",
+ reshapedDimensions[1], operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
+ }
+
armnn::FullyConnectedDescriptor descriptor;
descriptor.m_TransposeWeightMatrix = true;
descriptor.m_BiasEnabled = biasEnabled;
@@ -141,7 +162,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
IsFullyConnectedSupported,
delegateData.m_Backends,
isSupported,
- inputTensorInfo,
+ reshapedTensorInfo,
outputTensorInfo,
weightsTensorInfo,
biasTensorInfo,
@@ -184,22 +205,6 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
if (inputTensorInfo.GetNumDimensions() > 2)
{
// Add reshape to flatten to 2D [batch_size, input_size]
- std::vector<unsigned int> reshapedDimensions(2);
- reshapedDimensions[1] = weightsTensorInfo.GetShape()[1];
- reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1];
-
- if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0)
- {
- TF_LITE_MAYBE_KERNEL_LOG(
- tfLiteContext,
- "TfLiteArmnnDelegate: Failed to deduce input tensor shape from filter size #%d #%d node #%d: ",
- reshapedDimensions[1], operatorCode, nodeIndex);
- return kTfLiteError;
- }
-
- armnn::TensorInfo reshapedTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
- reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() });
-
armnn::ReshapeDescriptor reshapeDescriptor;
reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
@@ -210,7 +215,6 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
// Connect
delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(reshapeLayer->GetInputSlot(0));
reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
- armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
delegateData.m_OutputSlotForNode[tfLiteNode->outputs->data[0]] = &outputSlot;
}
diff --git a/delegate/src/test/FullyConnectedTest.cpp b/delegate/src/test/FullyConnectedTest.cpp
index 1d33381d6e..018f7f5190 100644
--- a/delegate/src/test/FullyConnectedTest.cpp
+++ b/delegate/src/test/FullyConnectedTest.cpp
@@ -8,9 +8,6 @@
namespace
{
-TEST_SUITE("FullyConnectedTest")
-{
-
void FullyConnectedFp32Test(std::vector<armnn::BackendId>& backends)
{
std::vector<int32_t> inputTensorShape { 1, 4, 1, 1 };
@@ -61,68 +58,100 @@ void FullyConnectedActicationTest(std::vector<armnn::BackendId>& backends)
weightsData);
}
-void FullyConnectedUint8Test(std::vector<armnn::BackendId>& backends)
+void FullyConnectedInt8Test(std::vector<armnn::BackendId>& backends)
{
std::vector<int32_t> inputTensorShape { 1, 4, 2, 1 };
std::vector<int32_t> weightsTensorShape { 1, 4 };
std::vector<int32_t> biasTensorShape { 1 };
std::vector<int32_t> outputTensorShape { 2, 1 };
- std::vector<uint8_t> inputValues = { 1, 2, 3, 4, 10, 20, 30, 40 };
- std::vector<uint8_t> weightsData = { 2, 3, 4, 5 };
+ std::vector<int8_t> inputValues = { 1, 2, 3, 4, 5, 10, 15, 20 };
+ std::vector<int8_t> weightsData = { 2, 3, 4, 5 };
- std::vector<uint8_t> expectedOutputValues = { (40 + 10) / 2, (400 + 10) / 2 };
+ std::vector<int8_t> expectedOutputValues = { 25, 105 }; // (40 + 10) / 2, (200 + 10) / 2
// bias is set std::vector<int32_t> biasData = { 10 } in the model
// input and weights quantization scale 1.0f and offset 0 in the model
// output quantization scale 2.0f and offset 0 in the model
- FullyConnectedTest<uint8_t>(backends,
- ::tflite::TensorType_UINT8,
- tflite::ActivationFunctionType_NONE,
- inputTensorShape,
- weightsTensorShape,
- biasTensorShape,
- outputTensorShape,
- inputValues,
- expectedOutputValues,
- weightsData);
+ FullyConnectedTest<int8_t>(backends,
+ ::tflite::TensorType_INT8,
+ tflite::ActivationFunctionType_NONE,
+ inputTensorShape,
+ weightsTensorShape,
+ biasTensorShape,
+ outputTensorShape,
+ inputValues,
+ expectedOutputValues,
+ weightsData);
}
-TEST_CASE ("FULLY_CONNECTED_FP32_GpuAcc_Test")
+TEST_SUITE("FullyConnected_GpuAccTests")
+{
+
+TEST_CASE ("FullyConnected_FP32_GpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
FullyConnectedFp32Test(backends);
}
-TEST_CASE ("FULLY_CONNECTED_FP32_CpuAcc_Test")
+TEST_CASE ("FullyConnected_Int8_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ FullyConnectedInt8Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Activation_GpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ FullyConnectedActicationTest(backends);
+}
+
+} // End of TEST_SUITE("FullyConnected_GpuAccTests")
+
+TEST_SUITE("FullyConnected_CpuAccTests")
+{
+
+TEST_CASE ("FullyConnected_FP32_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
FullyConnectedFp32Test(backends);
}
-TEST_CASE ("FULLY_CONNECTED_UINT8_GpuAcc_Test")
+TEST_CASE ("FullyConnected_Int8_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ FullyConnectedInt8Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Activation_CpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- FullyConnectedUint8Test(backends);
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ FullyConnectedActicationTest(backends);
+}
+
+} // End of TEST_SUITE("FullyConnected_CpuAccTests")
+
+TEST_SUITE("FullyConnected_CpuRefTests")
+{
+
+TEST_CASE ("FullyConnected_FP32_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ FullyConnectedFp32Test(backends);
}
-TEST_CASE ("FULLY_CONNECTED_UINT8_CpuAcc_Test")
+TEST_CASE ("FullyConnected_Int8_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- FullyConnectedUint8Test(backends);
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ FullyConnectedInt8Test(backends);
}
-TEST_CASE ("FULLY_CONNECTED_Activation_GpuAcc_Test")
+TEST_CASE ("FullyConnected_Activation_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
FullyConnectedActicationTest(backends);
}
-} // End of TEST_SUITE("FullyConnectedTest")
+} // End of TEST_SUITE("FullyConnected_CpuRefTests")
} // anonymous namespace \ No newline at end of file
diff --git a/delegate/src/test/FullyConnectedTestHelper.hpp b/delegate/src/test/FullyConnectedTestHelper.hpp
index 4eed9580f1..4b30424d86 100644
--- a/delegate/src/test/FullyConnectedTestHelper.hpp
+++ b/delegate/src/test/FullyConnectedTestHelper.hpp
@@ -41,7 +41,7 @@ std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
sizeof(T) * weightsData.size()));
auto biasTensorType = ::tflite::TensorType_FLOAT32;
- if (tensorType == ::tflite::TensorType_UINT8)
+ if (tensorType == ::tflite::TensorType_INT8)
{
biasTensorType = ::tflite::TensorType_INT32;
std::vector<int32_t> biasData = { 10 };