diff options
author | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2021-01-20 15:58:29 +0000 |
---|---|---|
committer | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2021-01-25 17:56:59 +0000 |
commit | 0b51d5ad533f8ecde71f957077690195eea29ffc (patch) | |
tree | d04aaecd63deb8c67f4cea001bc4ddac3181911c /delegate/src/test/DelegateOptionsTest.cpp | |
parent | e5617954db782628ca49919a627d01ee0088fb67 (diff) | |
download | armnn-0b51d5ad533f8ecde71f957077690195eea29ffc.tar.gz |
IVGCVSW-5619 Add OptimizerOptions and NetworkProperties to ArmNN Delegate
* Add OptimizerOptions, NetworkProperties, DebugCallbackFunction
to DelegateOptions
* Enable OptimizerOptions when the network is being optimized
* Enable NetworkProperties when loading network
* Enable DebugCallbackFunction
* Add error message when loading network
* Log warning instead of error when operator is not supported but
could fallback to another backend
* Improve uint16_t CompareData
* Unit tests
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I353035afb442774bfeb1c62570a90755c2ceaf38
Diffstat (limited to 'delegate/src/test/DelegateOptionsTest.cpp')
-rw-r--r-- | delegate/src/test/DelegateOptionsTest.cpp | 157 |
1 files changed, 157 insertions, 0 deletions
diff --git a/delegate/src/test/DelegateOptionsTest.cpp b/delegate/src/test/DelegateOptionsTest.cpp new file mode 100644 index 0000000000..c623781301 --- /dev/null +++ b/delegate/src/test/DelegateOptionsTest.cpp @@ -0,0 +1,157 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "DelegateOptionsTestHelper.hpp" + +namespace armnnDelegate +{ + +TEST_SUITE("DelegateOptions") +{ + +TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16") +{ + std::stringstream ss; + { + StreamRedirector redirect(std::cout, ss.rdbuf()); + + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + std::vector<int32_t> tensorShape { 1, 2, 2, 1 }; + std::vector<float> inputData = { 1, 2, 3, 4 }; + std::vector<float> divData = { 2, 2, 3, 4 }; + std::vector<float> expectedResult = { 1, 2, 2, 2 }; + + // Enable ReduceFp32ToFp16 + armnn::OptimizerOptions optimizerOptions(true, true, false, false); + armnn::INetworkProperties networkProperties; + armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions, networkProperties); + + DelegateOptionTest<float>(::tflite::TensorType_FLOAT32, + backends, + tensorShape, + inputData, + inputData, + divData, + expectedResult, + delegateOptions); + } + // ReduceFp32ToFp16 option is enabled + CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos); + CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos); +} + +TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug") +{ + std::stringstream ss; + { + StreamRedirector redirect(std::cout, ss.rdbuf()); + + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + std::vector<int32_t> tensorShape { 1, 2, 2, 1 }; + std::vector<float> inputData = { 1, 2, 3, 4 }; + std::vector<float> divData = { 2, 2, 3, 4 }; + std::vector<float> expectedResult = { 1, 2, 2, 2 }; + + // Enable Debug + armnn::OptimizerOptions optimizerOptions(false, true, false, false); + armnn::INetworkProperties networkProperties; + armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions, networkProperties); + + DelegateOptionTest<float>(::tflite::TensorType_FLOAT32, + backends, + tensorShape, + inputData, + inputData, + divData, + expectedResult, + delegateOptions); + } + // Debug option triggered. + CHECK(ss.str().find("layerGuid") != std::string::npos); + CHECK(ss.str().find("layerName") != std::string::npos); + CHECK(ss.str().find("outputSlot") != std::string::npos); + CHECK(ss.str().find("shape") != std::string::npos); + CHECK(ss.str().find("data") != std::string::npos); +} + +TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + std::vector<int32_t> tensorShape { 1, 2, 2, 1 }; + std::vector<float> inputData = { 1, 2, 3, 4 }; + std::vector<float> divData = { 2, 2, 3, 4 }; + std::vector<float> expectedResult = { 1, 2, 2, 2 }; + + // Enable debug with debug callback function + armnn::OptimizerOptions optimizerOptions(false, true, false, false); + bool callback = false; + auto mockCallback = [&](armnn::LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor) + { + armnn::IgnoreUnused(guid); + armnn::IgnoreUnused(slotIndex); + armnn::IgnoreUnused(tensor); + callback = true; + }; + + armnn::INetworkProperties networkProperties; + armnnDelegate::DelegateOptions delegateOptions(backends, + optimizerOptions, + networkProperties, + armnn::EmptyOptional(), + armnn::Optional<armnn::DebugCallbackFunction>(mockCallback)); + + CHECK(!callback); + + DelegateOptionTest<float>(::tflite::TensorType_FLOAT32, + backends, + tensorShape, + inputData, + inputData, + divData, + expectedResult, + delegateOptions); + + // Check that the debug callback function was called. + CHECK(callback); +} + +TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToBf16") +{ + std::stringstream ss; + { + StreamRedirector redirect(std::cout, ss.rdbuf()); + + ReduceFp32ToBf16TestImpl(); + } + + // ReduceFp32ToBf16 option is enabled + CHECK(ss.str().find("convert_fp32_to_bf16") != std::string::npos); +} + +TEST_CASE ("ArmnnDelegateOptimizerOptionsImport") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef }; + std::vector<int32_t> tensorShape { 1, 2, 2, 1 }; + std::vector<uint8_t> inputData = { 1, 2, 3, 4 }; + std::vector<uint8_t> divData = { 2, 2, 3, 4 }; + std::vector<uint8_t> expectedResult = { 1, 2, 2, 2}; + + armnn::OptimizerOptions optimizerOptions(false, false, false, true); + armnn::INetworkProperties networkProperties(true, true); + armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions, networkProperties); + + DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8, + backends, + tensorShape, + inputData, + inputData, + divData, + expectedResult, + delegateOptions); +} + +} + +} // namespace armnnDelegate |