aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/ExecuteNetwork/ArmNNExecutor.cpp1
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.cpp6
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp6
-rw-r--r--tests/InferenceModel.hpp1
4 files changed, 5 insertions, 9 deletions
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp
index e8b501489e..943d3aad07 100644
--- a/tests/ExecuteNetwork/ArmNNExecutor.cpp
+++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp
@@ -513,7 +513,6 @@ armnn::IOptimizedNetworkPtr ArmNNExecutor::OptimizeNetwork(armnn::INetwork* netw
armnn::OptimizerOptions options;
options.m_ReduceFp32ToFp16 = m_Params.m_EnableFp16TurboMode;
- options.m_ReduceFp32ToBf16 = m_Params.m_EnableBf16TurboMode;
options.m_Debug = m_Params.m_PrintIntermediate;
options.m_DebugToFile = m_Params.m_PrintIntermediateOutputsToFile;
options.m_shapeInferenceMethod = m_Params.m_InferOutputShape ?
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index 155a4c4a8b..fa467c93f8 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -60,10 +60,9 @@ void ExecuteNetworkParams::ValidateParams()
}
CheckClTuningParameter(m_TuningLevel, m_TuningPath, m_ComputeDevices);
- if (m_EnableBf16TurboMode && m_EnableFp16TurboMode)
+ if (m_EnableBf16TurboMode && !m_EnableFastMath)
{
- throw armnn::InvalidArgumentException("BFloat16 and Float16 turbo mode cannot be "
- "enabled at the same time.");
+ throw armnn::InvalidArgumentException("To use BF16 please use --enable-fast-math. ");
}
// Check input tensor shapes
@@ -124,7 +123,6 @@ armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const
armnn::OptimizerOptions options;
options.m_ReduceFp32ToFp16 = m_EnableFp16TurboMode;
- options.m_ReduceFp32ToBf16 = m_EnableBf16TurboMode;
options.m_Debug = m_PrintIntermediate;
options.m_DebugToFile = m_PrintIntermediateOutputsToFile;
options.m_ProfilingEnabled = m_EnableProfiling;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index 5f19a1498c..e9d77509e4 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -375,14 +375,14 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
m_CxxOptions.add_options("c) Optimization")
("bf16-turbo-mode",
- "If this option is enabled, FP32 layers, "
- "weights and biases will be converted to BFloat16 where the backend supports it",
+ "This option is no longer being used. In order to use bf16 please set enable-fast-math "
+ "to true",
cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
->default_value("false")->implicit_value("true"))
("enable-fast-math",
"Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
- "performance improvements but may result in reduced or different precision.",
+ "performance improvements but may result in reduced or different precision. ",
cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
("number-of-threads",
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index d837fc1fcf..28069242f2 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -452,7 +452,6 @@ public:
armnn::OptimizerOptions options;
options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
- options.m_ReduceFp32ToBf16 = params.m_EnableBf16TurboMode;
options.m_Debug = params.m_PrintIntermediateLayers;
options.m_DebugToFile = params.m_PrintIntermediateLayersToFile;
options.m_shapeInferenceMethod = params.m_InferOutputShape ?