aboutsummaryrefslogtreecommitdiff
path: root/tests/ExecuteNetwork/ExecuteNetwork.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/ExecuteNetwork/ExecuteNetwork.cpp')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp13
1 files changed, 8 insertions, 5 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index f2763a72b7..5924348763 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -137,7 +137,9 @@ int main(int argc, const char* argv[])
"Add unsupported operators as stand-in layers (where supported by parser)")
("infer-output-shape", po::bool_switch()->default_value(false),
"Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
- "parser)");
+ "parser)")
+ ("enable_fast_math", po::bool_switch()->default_value(false),
+ "Enable fast_math computation of Convolution2D operator where applicable (where supported by backend)");
}
catch (const std::exception& e)
{
@@ -187,6 +189,7 @@ int main(int argc, const char* argv[])
bool parseUnsupported = vm["parse-unsupported"].as<bool>();
bool timelineEnabled = vm["timeline-profiling"].as<bool>();
bool inferOutputShape = vm["infer-output-shape"].as<bool>();
+ bool enableFastMath = vm["enable_fast_math"].as<bool>();
if (enableBf16TurboMode && enableFp16TurboMode)
{
@@ -250,7 +253,7 @@ int main(int argc, const char* argv[])
results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
enableProfiling, enableFp16TurboMode, enableBf16TurboMode, thresholdTime,
printIntermediate, enableLayerDetails, parseUnsupported,
- inferOutputShape));
+ inferOutputShape, enableFastMath));
}
// Check results
@@ -270,7 +273,7 @@ int main(int argc, const char* argv[])
testCase.values.insert(testCase.values.begin(), executableName);
if (RunCsvTest(testCase, runtime, enableProfiling,
enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate,
- enableLayerDetails, parseUnsupported, inferOutputShape) != EXIT_SUCCESS)
+ enableLayerDetails, parseUnsupported, inferOutputShape, enableFastMath) != EXIT_SUCCESS)
{
return EXIT_FAILURE;
}
@@ -303,7 +306,7 @@ int main(int argc, const char* argv[])
dynamicBackendsPath, modelPath, inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput,
outputTypes, outputNames, outputTensorFiles, dequantizeOutput, enableProfiling,
enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate, subgraphId,
- enableLayerDetails, parseUnsupported, inferOutputShape);
+ enableLayerDetails, parseUnsupported, inferOutputShape, enableFastMath);
}
ARMNN_LOG(info) << "Using tuning params: " << tuningPath << "\n";
options.m_BackendOptions.emplace_back(
@@ -336,6 +339,6 @@ int main(int argc, const char* argv[])
inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, inferOutputShape,
- iterations, runtime);
+ enableFastMath, iterations, runtime);
}
}