aboutsummaryrefslogtreecommitdiff
path: root/delegate/test/DelegateOptionsTest.cpp
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-14 12:10:28 +0000
committerTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-28 11:41:55 +0100
commitad1b3d7518429e2d16a2695d9b0bbf81b6565ac9 (patch)
treea5b8e1ad68a2437f007338f0b6195ca5ed2bddc3 /delegate/test/DelegateOptionsTest.cpp
parent9cb3466b677a1048b8abb24661e92c4c83fdda04 (diff)
downloadarmnn-ad1b3d7518429e2d16a2695d9b0bbf81b6565ac9.tar.gz
IVGCVSW-7555 Restructure Delegate
* New folders created: * common is for common code where TfLite API is not used * classic is for existing delegate implementations * opaque is for new opaque delegate implementation, * tests is for shared between existing Delegate and Opaque Delegate which have test utils to work which delegate to use. * Existing delegate is built to libarmnnDelegate.so and opaque delegate is built as libarmnnOpaqueDelegate.so * Opaque structure is introduced but no API is added yet. * CmakeList.txt and delegate/CMakeList.txt have been modified and 2 new CmakeList.txt added * Rename BUILD_ARMNN_TFLITE_DELEGATE as BUILD_CLASSIC_DELEGATE * Rename BUILD_ARMNN_TFLITE_OPAQUE_DELEGATE as BUILD_OPAQUE_DELEGATE Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ib682b9ad0ac8d8acdc4ec6d9099bb0008a9fe8ed
Diffstat (limited to 'delegate/test/DelegateOptionsTest.cpp')
-rw-r--r--delegate/test/DelegateOptionsTest.cpp372
1 files changed, 372 insertions, 0 deletions
diff --git a/delegate/test/DelegateOptionsTest.cpp b/delegate/test/DelegateOptionsTest.cpp
new file mode 100644
index 0000000000..ecd8c736e8
--- /dev/null
+++ b/delegate/test/DelegateOptionsTest.cpp
@@ -0,0 +1,372 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "DelegateOptionsTestHelper.hpp"
+#include <common/include/ProfilingGuid.hpp>
+#include <armnnUtils/Filesystem.hpp>
+
+namespace armnnDelegate
+{
+
+TEST_SUITE("DelegateOptions")
+{
+
+TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16")
+{
+ std::stringstream ss;
+ {
+ StreamRedirector redirect(std::cout, ss.rdbuf());
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+ std::vector<float> inputData = { 1, 2, 3, 4 };
+ std::vector<float> divData = { 2, 2, 3, 4 };
+ std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+ // Enable ReduceFp32ToFp16
+ armnn::OptimizerOptions optimizerOptions(true, true, false, false);
+ armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+
+ DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+ backends,
+ tensorShape,
+ inputData,
+ inputData,
+ divData,
+ expectedResult,
+ delegateOptions);
+ }
+ // ReduceFp32ToFp16 option is enabled
+ CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
+ CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
+}
+
+TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug")
+{
+ std::stringstream ss;
+ {
+ StreamRedirector redirect(std::cout, ss.rdbuf());
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+ std::vector<float> inputData = { 1, 2, 3, 4 };
+ std::vector<float> divData = { 2, 2, 3, 4 };
+ std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+ // Enable Debug
+ armnn::OptimizerOptions optimizerOptions(false, true, false, false);
+ armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+
+ DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+ backends,
+ tensorShape,
+ inputData,
+ inputData,
+ divData,
+ expectedResult,
+ delegateOptions);
+ }
+ // Debug option triggered.
+ CHECK(ss.str().find("layerGuid") != std::string::npos);
+ CHECK(ss.str().find("layerName") != std::string::npos);
+ CHECK(ss.str().find("outputSlot") != std::string::npos);
+ CHECK(ss.str().find("shape") != std::string::npos);
+ CHECK(ss.str().find("data") != std::string::npos);
+}
+
+TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+ std::vector<float> inputData = { 1, 2, 3, 4 };
+ std::vector<float> divData = { 2, 2, 3, 4 };
+ std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+ // Enable debug with debug callback function
+ armnn::OptimizerOptions optimizerOptions(false, true, false, false);
+ bool callback = false;
+ auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
+ {
+ armnn::IgnoreUnused(guid);
+ armnn::IgnoreUnused(slotIndex);
+ armnn::IgnoreUnused(tensor);
+ callback = true;
+ };
+
+ armnn::INetworkProperties networkProperties(false, armnn::MemorySource::Undefined, armnn::MemorySource::Undefined);
+ armnnDelegate::DelegateOptions delegateOptions(backends,
+ optimizerOptions,
+ armnn::EmptyOptional(),
+ armnn::Optional<armnn::DebugCallbackFunction>(mockCallback));
+
+ CHECK(!callback);
+
+ DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+ backends,
+ tensorShape,
+ inputData,
+ inputData,
+ divData,
+ expectedResult,
+ delegateOptions);
+
+ // Check that the debug callback function was called.
+ CHECK(callback);
+}
+
+TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
+ std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+ std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
+ std::vector<uint8_t> divData = { 2, 2, 3, 4 };
+ std::vector<uint8_t> expectedResult = { 1, 2, 2, 2 };
+
+ armnn::OptimizerOptions optimizerOptions(false, false, false, true);
+ armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+
+ DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
+ backends,
+ tensorShape,
+ inputData,
+ inputData,
+ divData,
+ expectedResult,
+ delegateOptions);
+}
+
+TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback")
+{
+ std::stringstream stringStream;
+ std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
+ std::vector<std::string> values { "CpuRef", "1", "1"};
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+ std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
+ std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
+
+ // Create options_keys and options_values char array
+ size_t num_options = keys.size();
+ std::unique_ptr<const char*> options_keys =
+ std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*> options_values =
+ std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ for (size_t i=0; i<num_options; ++i)
+ {
+ options_keys.get()[i] = keys[i].c_str();
+ options_values.get()[i] = values[i].c_str();
+ }
+
+ StreamRedirector redirect(std::cout, stringStream.rdbuf());
+
+ armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
+ DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
+ backends,
+ tensorShape,
+ inputData,
+ expectedResult,
+ delegateOptions);
+ CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
+ != std::string::npos);
+}
+
+TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback")
+{
+ std::stringstream stringStream;
+ std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
+ std::vector<std::string> values { "CpuRef", "1", "0"};
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+ std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
+ std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
+
+ // Create options_keys and options_values char array
+ size_t num_options = keys.size();
+ std::unique_ptr<const char*> options_keys =
+ std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*> options_values =
+ std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ for (size_t i=0; i<num_options; ++i)
+ {
+ options_keys.get()[i] = keys[i].c_str();
+ options_values.get()[i] = values[i].c_str();
+ }
+
+ StreamRedirector redirect(std::cout, stringStream.rdbuf());
+
+ armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
+ DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
+ backends,
+ tensorShape,
+ inputData,
+ expectedResult,
+ delegateOptions);
+
+ CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
+ == std::string::npos);
+}
+
+}
+
+TEST_SUITE("DelegateOptions_CpuAccTests")
+{
+
+TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+ std::vector<float> inputData = { 1, 2, 3, 4 };
+ std::vector<float> divData = { 2, 2, 3, 4 };
+ std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+ unsigned int numberOfThreads = 2;
+
+ armnn::ModelOptions modelOptions;
+ armnn::BackendOptions cpuAcc("CpuAcc",
+ {
+ { "FastMathEnabled", true },
+ { "NumberOfThreads", numberOfThreads }
+ });
+ modelOptions.push_back(cpuAcc);
+
+ armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+ armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+
+ DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+ backends,
+ tensorShape,
+ inputData,
+ inputData,
+ divData,
+ expectedResult,
+ delegateOptions);
+}
+
+TEST_CASE ("ArmnnDelegateSerializeToDot")
+{
+ const fs::path filename(fs::temp_directory_path() / "ArmnnDelegateSerializeToDot.dot");
+ if ( fs::exists(filename) )
+ {
+ fs::remove(filename);
+ }
+ std::stringstream ss;
+ {
+ StreamRedirector redirect(std::cout, ss.rdbuf());
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+ std::vector<float> inputData = { 1, 2, 3, 4 };
+ std::vector<float> divData = { 2, 2, 3, 4 };
+ std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+ armnn::OptimizerOptions optimizerOptions(false, false, false, false);
+ armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+ // Enable serialize to dot by specifying the target file name.
+ delegateOptions.SetSerializeToDot(filename);
+ DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+ backends,
+ tensorShape,
+ inputData,
+ inputData,
+ divData,
+ expectedResult,
+ delegateOptions);
+ }
+ CHECK(fs::exists(filename));
+ // The file should have a size greater than 0 bytes.
+ CHECK(fs::file_size(filename) > 0);
+ // Clean up.
+ fs::remove(filename);
+}
+
+void CreateFp16StringParsingTestRun(std::vector<std::string>& keys,
+ std::vector<std::string>& values,
+ std::stringstream& ss)
+{
+ StreamRedirector redirect(std::cout, ss.rdbuf());
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+ std::vector<float> inputData = { 1, 2, 3, 4 };
+ std::vector<float> divData = { 2, 2, 3, 4 };
+ std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+ // Create options_keys and options_values char array
+ size_t num_options = keys.size();
+ std::unique_ptr<const char*> options_keys =
+ std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*> options_values =
+ std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ for (size_t i=0; i<num_options; ++i)
+ {
+ options_keys.get()[i] = keys[i].c_str();
+ options_values.get()[i] = values[i].c_str();
+ }
+
+ armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
+ DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+ backends,
+ tensorShape,
+ inputData,
+ inputData,
+ divData,
+ expectedResult,
+ delegateOptions);
+}
+
+TEST_CASE ("ArmnnDelegateStringParsingOptionReduceFp32ToFp16")
+{
+ SUBCASE("Fp16=1")
+ {
+ std::stringstream ss;
+ std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16", "logging-severity"};
+ std::vector<std::string> values { "CpuRef", "1", "1", "info"};
+ CreateFp16StringParsingTestRun(keys, values, ss);
+ CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
+ CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
+ }
+ SUBCASE("Fp16=true")
+ {
+ std::stringstream ss;
+ std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
+ std::vector<std::string> values { "CpuRef", "TRUE", "true"};
+ CreateFp16StringParsingTestRun(keys, values, ss);
+ CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
+ CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
+ }
+ SUBCASE("Fp16=True")
+ {
+ std::stringstream ss;
+ std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
+ std::vector<std::string> values { "CpuRef", "true", "True"};
+ CreateFp16StringParsingTestRun(keys, values, ss);
+ CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
+ CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
+ }
+ SUBCASE("Fp16=0")
+ {
+ std::stringstream ss;
+ std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
+ std::vector<std::string> values { "CpuRef", "true", "0"};
+ CreateFp16StringParsingTestRun(keys, values, ss);
+ CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
+ CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
+ }
+ SUBCASE("Fp16=false")
+ {
+ std::stringstream ss;
+ std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
+ std::vector<std::string> values { "CpuRef", "1", "false"};
+ CreateFp16StringParsingTestRun(keys, values, ss);
+ CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
+ CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
+ }
+}
+
+}
+
+} // namespace armnnDelegate