aboutsummaryrefslogtreecommitdiff
path: root/delegate/test/FullyConnectedTest.cpp
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-14 12:10:28 +0000
committerTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-28 11:41:55 +0100
commitad1b3d7518429e2d16a2695d9b0bbf81b6565ac9 (patch)
treea5b8e1ad68a2437f007338f0b6195ca5ed2bddc3 /delegate/test/FullyConnectedTest.cpp
parent9cb3466b677a1048b8abb24661e92c4c83fdda04 (diff)
downloadarmnn-ad1b3d7518429e2d16a2695d9b0bbf81b6565ac9.tar.gz
IVGCVSW-7555 Restructure Delegate
* New folders created: * common is for common code where TfLite API is not used * classic is for existing delegate implementations * opaque is for new opaque delegate implementation, * tests is for shared between existing Delegate and Opaque Delegate which have test utils to work which delegate to use. * Existing delegate is built to libarmnnDelegate.so and opaque delegate is built as libarmnnOpaqueDelegate.so * Opaque structure is introduced but no API is added yet. * CmakeList.txt and delegate/CMakeList.txt have been modified and 2 new CmakeList.txt added * Rename BUILD_ARMNN_TFLITE_DELEGATE as BUILD_CLASSIC_DELEGATE * Rename BUILD_ARMNN_TFLITE_OPAQUE_DELEGATE as BUILD_OPAQUE_DELEGATE Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ib682b9ad0ac8d8acdc4ec6d9099bb0008a9fe8ed
Diffstat (limited to 'delegate/test/FullyConnectedTest.cpp')
-rw-r--r--delegate/test/FullyConnectedTest.cpp178
1 files changed, 178 insertions, 0 deletions
diff --git a/delegate/test/FullyConnectedTest.cpp b/delegate/test/FullyConnectedTest.cpp
new file mode 100644
index 0000000000..3ef5cedbd7
--- /dev/null
+++ b/delegate/test/FullyConnectedTest.cpp
@@ -0,0 +1,178 @@
+//
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "FullyConnectedTestHelper.hpp"
+
+namespace
+{
+
+void FullyConnectedFp32Test(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
+{
+ std::vector<int32_t> inputTensorShape { 1, 4, 1, 1 };
+ std::vector<int32_t> weightsTensorShape { 1, 4 };
+ std::vector<int32_t> biasTensorShape { 1 };
+ std::vector<int32_t> outputTensorShape { 1, 1 };
+
+ std::vector<float> inputValues = { 10, 20, 30, 40 };
+ std::vector<float> weightsData = { 2, 3, 4, 5 };
+
+ std::vector<float> expectedOutputValues = { (400 + 10) };
+
+ // bias is set std::vector<float> biasData = { 10 } in the model
+ FullyConnectedTest<float>(backends,
+ ::tflite::TensorType_FLOAT32,
+ tflite::ActivationFunctionType_NONE,
+ inputTensorShape,
+ weightsTensorShape,
+ biasTensorShape,
+ outputTensorShape,
+ inputValues,
+ expectedOutputValues,
+ weightsData,
+ constantWeights);
+}
+
+void FullyConnectedActivationTest(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
+{
+ std::vector<int32_t> inputTensorShape { 1, 4, 1, 1 };
+ std::vector<int32_t> weightsTensorShape { 1, 4 };
+ std::vector<int32_t> biasTensorShape { 1 };
+ std::vector<int32_t> outputTensorShape { 1, 1 };
+
+ std::vector<float> inputValues = { -10, 20, 30, 40 };
+ std::vector<float> weightsData = { 2, 3, 4, -5 };
+
+ std::vector<float> expectedOutputValues = { 0 };
+
+ // bias is set std::vector<float> biasData = { 10 } in the model
+ FullyConnectedTest<float>(backends,
+ ::tflite::TensorType_FLOAT32,
+ tflite::ActivationFunctionType_RELU,
+ inputTensorShape,
+ weightsTensorShape,
+ biasTensorShape,
+ outputTensorShape,
+ inputValues,
+ expectedOutputValues,
+ weightsData,
+ constantWeights);
+}
+
+void FullyConnectedInt8Test(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
+{
+ std::vector<int32_t> inputTensorShape { 1, 4, 2, 1 };
+ std::vector<int32_t> weightsTensorShape { 1, 4 };
+ std::vector<int32_t> biasTensorShape { 1 };
+ std::vector<int32_t> outputTensorShape { 2, 1 };
+
+ std::vector<int8_t> inputValues = { 1, 2, 3, 4, 5, 10, 15, 20 };
+ std::vector<int8_t> weightsData = { 2, 3, 4, 5 };
+
+ std::vector<int8_t> expectedOutputValues = { 25, 105 }; // (40 + 10) / 2, (200 + 10) / 2
+
+ // bias is set std::vector<int32_t> biasData = { 10 } in the model
+ // input and weights quantization scale 1.0f and offset 0 in the model
+ // output quantization scale 2.0f and offset 0 in the model
+ FullyConnectedTest<int8_t>(backends,
+ ::tflite::TensorType_INT8,
+ tflite::ActivationFunctionType_NONE,
+ inputTensorShape,
+ weightsTensorShape,
+ biasTensorShape,
+ outputTensorShape,
+ inputValues,
+ expectedOutputValues,
+ weightsData,
+ constantWeights);
+}
+
+TEST_SUITE("FullyConnected_GpuAccTests")
+{
+
+TEST_CASE ("FullyConnected_FP32_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ FullyConnectedFp32Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Int8_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ FullyConnectedInt8Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Activation_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ FullyConnectedActivationTest(backends);
+}
+
+} // End of TEST_SUITE("FullyConnected_GpuAccTests")
+
+TEST_SUITE("FullyConnected_CpuAccTests")
+{
+
+TEST_CASE ("FullyConnected_FP32_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ FullyConnectedFp32Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Int8_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ FullyConnectedInt8Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Activation_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ FullyConnectedActivationTest(backends);
+}
+
+} // End of TEST_SUITE("FullyConnected_CpuAccTests")
+
+TEST_SUITE("FullyConnected_CpuRefTests")
+{
+
+TEST_CASE ("FullyConnected_FP32_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ FullyConnectedFp32Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Int8_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ FullyConnectedInt8Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Activation_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ FullyConnectedActivationTest(backends);
+}
+
+TEST_CASE ("FullyConnected_Weights_As_Inputs_FP32_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ FullyConnectedFp32Test(backends, false);
+}
+
+TEST_CASE ("FullyConnected_Weights_As_Inputs_Int8_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ FullyConnectedInt8Test(backends, false);
+}
+
+TEST_CASE ("FullyConnected_Weights_As_Inputs_Activation_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ FullyConnectedActivationTest(backends, false);
+}
+
+} // End of TEST_SUITE("FullyConnected_CpuRefTests")
+
+} // anonymous namespace \ No newline at end of file