// // Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "DelegateOptionsTestHelper.hpp" #include #include #include namespace armnnDelegate { TEST_SUITE("DelegateOptions") { TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16") { std::stringstream ss; { StreamRedirector redirect(std::cout, ss.rdbuf()); std::vector backends = { armnn::Compute::CpuRef }; std::vector tensorShape { 1, 2, 2, 1 }; std::vector inputData = { 1, 2, 3, 4 }; std::vector divData = { 2, 2, 3, 4 }; std::vector expectedResult = { 1, 2, 2, 2 }; // Enable ReduceFp32ToFp16 armnn::OptimizerOptionsOpaque optimizerOptions(true, true, false, false); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_FLOAT32, tensorShape, inputData, inputData, divData, expectedResult, delegateOptions); } // ReduceFp32ToFp16 option is enabled CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos); CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos); } TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug") { std::stringstream ss; { StreamRedirector redirect(std::cout, ss.rdbuf()); std::vector backends = { armnn::Compute::CpuRef }; std::vector tensorShape { 1, 2, 2, 1 }; std::vector inputData = { 1, 2, 3, 4 }; std::vector divData = { 2, 2, 3, 4 }; std::vector expectedResult = { 1, 2, 2, 2 }; // Enable Debug armnn::OptimizerOptionsOpaque optimizerOptions(false, true, false, false); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_FLOAT32, tensorShape, inputData, inputData, divData, expectedResult, delegateOptions); } // Debug option triggered. CHECK(ss.str().find("layerGuid") != std::string::npos); CHECK(ss.str().find("layerName") != std::string::npos); CHECK(ss.str().find("outputSlot") != std::string::npos); CHECK(ss.str().find("shape") != std::string::npos); CHECK(ss.str().find("data") != std::string::npos); } TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction") { std::vector backends = { armnn::Compute::CpuRef }; std::vector tensorShape { 1, 2, 2, 1 }; std::vector inputData = { 1, 2, 3, 4 }; std::vector divData = { 2, 2, 3, 4 }; std::vector expectedResult = { 1, 2, 2, 2 }; // Enable debug with debug callback function armnn::OptimizerOptionsOpaque optimizerOptions(false, true, false, false); bool callback = false; auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor) { armnn::IgnoreUnused(guid); armnn::IgnoreUnused(slotIndex); armnn::IgnoreUnused(tensor); callback = true; }; armnn::INetworkProperties networkProperties(false, armnn::MemorySource::Undefined, armnn::MemorySource::Undefined); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions, armnn::EmptyOptional(), armnn::Optional(mockCallback)); CHECK(!callback); DelegateOptionTest(::tflite::TensorType_FLOAT32, tensorShape, inputData, inputData, divData, expectedResult, delegateOptions); // Check that the debug callback function was called. CHECK(callback); } TEST_CASE ("ArmnnDelegateOptimizerOptionsImport") { std::vector backends = { armnn::Compute::CpuRef }; std::vector tensorShape { 1, 2, 2, 1 }; std::vector inputData = { 1, 2, 3, 4 }; std::vector divData = { 2, 2, 3, 4 }; std::vector expectedResult = { 1, 2, 2, 2 }; armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false, true); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_UINT8, tensorShape, inputData, inputData, divData, expectedResult, delegateOptions); } TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback") { std::stringstream stringStream; std::vector keys { "backends", "debug-data", "disable-tflite-runtime-fallback"}; std::vector values { "CpuRef", "1", "1"}; std::vector backends = { armnn::Compute::CpuRef }; std::vector tensorShape { 1, 2, 2, 1 }; std::vector inputData = { 0.1f, -2.1f, 3.0f, -4.6f }; std::vector expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f }; // Create options_keys and options_values char array size_t num_options = keys.size(); std::unique_ptr options_keys = std::unique_ptr(new const char*[num_options + 1]); std::unique_ptr options_values = std::unique_ptr(new const char*[num_options + 1]); for (size_t i=0; i(::tflite::TensorType_FLOAT32, tensorShape, inputData, expectedResult, delegateOptions); CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model") != std::string::npos); } TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback") { std::stringstream stringStream; std::vector keys { "backends", "debug-data", "disable-tflite-runtime-fallback"}; std::vector values { "CpuRef", "1", "0"}; std::vector backends = { armnn::Compute::CpuRef }; std::vector tensorShape { 1, 2, 2, 1 }; std::vector inputData = { 0.1f, -2.1f, 3.0f, -4.6f }; std::vector expectedResult = { 0.995004177f, -0.504846036f, -0.989992499f, -0.112152621f }; // Create options_keys and options_values char array size_t num_options = keys.size(); std::unique_ptr options_keys = std::unique_ptr(new const char*[num_options + 1]); std::unique_ptr options_values = std::unique_ptr(new const char*[num_options + 1]); for (size_t i=0; i(::tflite::TensorType_FLOAT32, tensorShape, inputData, expectedResult, delegateOptions); CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model") == std::string::npos); } } TEST_SUITE("DelegateOptions_CpuAccTests") { TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test") { std::vector backends = { armnn::Compute::CpuAcc }; std::vector tensorShape { 1, 2, 2, 1 }; std::vector inputData = { 1, 2, 3, 4 }; std::vector divData = { 2, 2, 3, 4 }; std::vector expectedResult = { 1, 2, 2, 2 }; unsigned int numberOfThreads = 2; armnn::ModelOptions modelOptions; armnn::BackendOptions cpuAcc("CpuAcc", { { "FastMathEnabled", true }, { "NumberOfThreads", numberOfThreads } }); modelOptions.push_back(cpuAcc); armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false, false, modelOptions, false); std::vector availableBackends = CaptureAvailableBackends(backends); // It's possible that CpuAcc isn't supported. In that case availableBackends will be empty. if (availableBackends.empty()) { return; } armnnDelegate::DelegateOptions delegateOptions(availableBackends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_FLOAT32, tensorShape, inputData, inputData, divData, expectedResult, delegateOptions); } TEST_CASE ("ArmnnDelegateSerializeToDot") { const fs::path filename(fs::temp_directory_path() / "ArmnnDelegateSerializeToDot.dot"); if ( fs::exists(filename) ) { fs::remove(filename); } std::stringstream ss; { StreamRedirector redirect(std::cout, ss.rdbuf()); std::vector backends = { armnn::Compute::CpuRef }; std::vector tensorShape { 1, 2, 2, 1 }; std::vector inputData = { 1, 2, 3, 4 }; std::vector divData = { 2, 2, 3, 4 }; std::vector expectedResult = { 1, 2, 2, 2 }; armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false, false); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); // Enable serialize to dot by specifying the target file name. delegateOptions.SetSerializeToDot(filename); DelegateOptionTest(::tflite::TensorType_FLOAT32, tensorShape, inputData, inputData, divData, expectedResult, delegateOptions); } CHECK(fs::exists(filename)); // The file should have a size greater than 0 bytes. CHECK(fs::file_size(filename) > 0); // Clean up. fs::remove(filename); } void CreateFp16StringParsingTestRun(std::vector& keys, std::vector& values, std::stringstream& ss) { StreamRedirector redirect(std::cout, ss.rdbuf()); std::vector backends = { armnn::Compute::CpuRef }; std::vector tensorShape { 1, 2, 2, 1 }; std::vector inputData = { 1, 2, 3, 4 }; std::vector divData = { 2, 2, 3, 4 }; std::vector expectedResult = { 1, 2, 2, 2 }; // Create options_keys and options_values char array size_t num_options = keys.size(); std::unique_ptr options_keys = std::unique_ptr(new const char*[num_options + 1]); std::unique_ptr options_values = std::unique_ptr(new const char*[num_options + 1]); for (size_t i=0; i(::tflite::TensorType_FLOAT32, tensorShape, inputData, inputData, divData, expectedResult, delegateOptions); } TEST_CASE ("ArmnnDelegateStringParsingOptionReduceFp32ToFp16") { SUBCASE("Fp16=1") { std::stringstream ss; std::vector keys { "backends", "debug-data", "reduce-fp32-to-fp16", "logging-severity"}; std::vector values { "CpuRef", "1", "1", "info"}; CreateFp16StringParsingTestRun(keys, values, ss); CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos); CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos); } SUBCASE("Fp16=true") { std::stringstream ss; std::vector keys { "backends", "debug-data", "reduce-fp32-to-fp16"}; std::vector values { "CpuRef", "TRUE", "true"}; CreateFp16StringParsingTestRun(keys, values, ss); CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos); CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos); } SUBCASE("Fp16=True") { std::stringstream ss; std::vector keys { "backends", "debug-data", "reduce-fp32-to-fp16"}; std::vector values { "CpuRef", "true", "True"}; CreateFp16StringParsingTestRun(keys, values, ss); CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos); CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos); } SUBCASE("Fp16=0") { std::stringstream ss; std::vector keys { "backends", "debug-data", "reduce-fp32-to-fp16"}; std::vector values { "CpuRef", "true", "0"}; CreateFp16StringParsingTestRun(keys, values, ss); CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos); CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos); } SUBCASE("Fp16=false") { std::stringstream ss; std::vector keys { "backends", "debug-data", "reduce-fp32-to-fp16"}; std::vector values { "CpuRef", "1", "false"}; CreateFp16StringParsingTestRun(keys, values, ss); CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos); CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos); } } } } // namespace armnnDelegate