aboutsummaryrefslogtreecommitdiff
path: root/include/armnn/INetwork.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'include/armnn/INetwork.hpp')
-rw-r--r--include/armnn/INetwork.hpp27
1 files changed, 27 insertions, 0 deletions
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index a4b37f37eb..6119f124e1 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -12,6 +12,7 @@
#include <armnn/NetworkFwd.hpp>
#include <armnn/Optional.hpp>
#include <armnn/TensorFwd.hpp>
+#include <armnn/Logging.hpp>
#include <memory>
#include <vector>
@@ -162,6 +163,32 @@ struct OptimizerOptions
}
}
+ const std::string ToString() const
+ {
+ std::stringstream stream;
+ stream << "OptimizerOptions: \n";
+ stream << "\tReduceFp32ToFp16: " << m_ReduceFp32ToFp16 << "\n";
+ stream << "\tReduceFp32ToBf16: " << m_ReduceFp32ToBf16 << "\n";
+ stream << "\tDebug: " <<
+ (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n";
+ stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
+ stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n";
+
+ stream << "\tModelOptions: \n";
+ for (auto optionsGroup : m_ModelOptions)
+ {
+ for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
+ {
+ const armnn::BackendOptions::BackendOption option = optionsGroup.GetOption(i);
+ stream << "\t\tBackend: " << optionsGroup.GetBackendId()
+ << "\t\t\tOption: " << option.GetName()
+ << "\t\t\tValue: " << std::string(option.GetValue().ToString());
+ }
+ }
+
+ return stream.str();
+ }
+
/// Reduces all Fp32 operators in the model to Fp16 for faster processing.
/// @Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers
/// between layers that weren't in Fp32 in the first place or if the operator is not supported in Fp16.