From 9214d81ad52a80e69618bea09870f1afff446d60 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Fri, 21 Aug 2020 10:03:49 +0100 Subject: IVGCVSW-5200 Add import enabled optimizer options to PyArmNN Signed-off-by: Narumol Prangnawarat Change-Id: Ic7c288fd829d7f1f1ae2910c47fbccdd635def8f --- include/armnn/INetwork.hpp | 13 +++++++++++++ python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i | 5 ++++- python/pyarmnn/test/test_network.py | 10 ++++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 5e8a6f2476..1d4939e03d 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -616,6 +616,19 @@ struct OptimizerOptions , m_ImportEnabled(false) {} + OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled) + : m_ReduceFp32ToFp16(reduceFp32ToFp16) + , m_Debug(debug) + , m_ReduceFp32ToBf16(reduceFp32ToBf16) + , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly) + , m_ImportEnabled(importEnabled) + { + if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16) + { + throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time."); + } + } + OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false, ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly, bool importEnabled = false) diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i index 4665e6087e..57bf355253 100644 --- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i +++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i @@ -24,6 +24,7 @@ Contains: that can not be reduced will be left in Fp32. m_ReduceFp32ToFp16 (bool): Reduces Fp32 network to Fp16 for faster processing. Layers that can not be reduced will be left in Fp32. + m_ImportEnabled (bool): Enable memory import. ") OptimizerOptions; struct OptimizerOptions @@ -32,11 +33,13 @@ struct OptimizerOptions OptimizerOptions(bool reduceFp32ToFp16, bool debug, - bool reduceFp32ToBf16 = false); + bool reduceFp32ToBf16 = false, + bool importEnabled = false); bool m_ReduceFp32ToBf16; bool m_ReduceFp32ToFp16; bool m_Debug; + bool m_ImportEnabled; }; %feature("docstring", diff --git a/python/pyarmnn/test/test_network.py b/python/pyarmnn/test/test_network.py index 679e640374..c24b113cdb 100644 --- a/python/pyarmnn/test/test_network.py +++ b/python/pyarmnn/test/test_network.py @@ -11,18 +11,28 @@ def test_optimizer_options_default_values(): assert opt.m_ReduceFp32ToFp16 == False assert opt.m_Debug == False assert opt.m_ReduceFp32ToBf16 == False + assert opt.m_ImportEnabled == False def test_optimizer_options_set_values1(): opt = ann.OptimizerOptions(True, True) assert opt.m_ReduceFp32ToFp16 == True assert opt.m_Debug == True assert opt.m_ReduceFp32ToBf16 == False + assert opt.m_ImportEnabled == False def test_optimizer_options_set_values2(): opt = ann.OptimizerOptions(False, False, True) assert opt.m_ReduceFp32ToFp16 == False assert opt.m_Debug == False assert opt.m_ReduceFp32ToBf16 == True + assert opt.m_ImportEnabled == False + +def test_optimizer_options_set_values3(): + opt = ann.OptimizerOptions(False, False, True, True) + assert opt.m_ReduceFp32ToFp16 == False + assert opt.m_Debug == False + assert opt.m_ReduceFp32ToBf16 == True + assert opt.m_ImportEnabled == True @pytest.fixture(scope="function") def get_runtime(shared_data_folder, network_file): -- cgit v1.2.1