diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2020-09-15 17:17:08 +0100 |
---|---|---|
committer | Sadik Armagan <sadik.armagan@arm.com> | 2020-09-15 16:15:52 +0000 |
commit | a25886e0966a6b9433cd23595688fadb88a161b2 (patch) | |
tree | f197d2521f7d7120a4d4397a73ca410f83a5d2de /tests/ExecuteNetwork | |
parent | 6f8699ac6e26f230a734168853c64490d70ac3bc (diff) | |
download | armnn-a25886e0966a6b9433cd23595688fadb88a161b2.tar.gz |
IVGCVSW-5317 'Add enable_fast_math Option to ExecuteNetwork'
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I4eb3e27837aea926593d49f9ccea07bab8388d5b
Diffstat (limited to 'tests/ExecuteNetwork')
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetwork.cpp | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index f2763a72b7..5924348763 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -137,7 +137,9 @@ int main(int argc, const char* argv[]) "Add unsupported operators as stand-in layers (where supported by parser)") ("infer-output-shape", po::bool_switch()->default_value(false), "Infers output tensor shape from input tensor shape and validate where applicable (where supported by " - "parser)"); + "parser)") + ("enable_fast_math", po::bool_switch()->default_value(false), + "Enable fast_math computation of Convolution2D operator where applicable (where supported by backend)"); } catch (const std::exception& e) { @@ -187,6 +189,7 @@ int main(int argc, const char* argv[]) bool parseUnsupported = vm["parse-unsupported"].as<bool>(); bool timelineEnabled = vm["timeline-profiling"].as<bool>(); bool inferOutputShape = vm["infer-output-shape"].as<bool>(); + bool enableFastMath = vm["enable_fast_math"].as<bool>(); if (enableBf16TurboMode && enableFp16TurboMode) { @@ -250,7 +253,7 @@ int main(int argc, const char* argv[]) results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime), enableProfiling, enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate, enableLayerDetails, parseUnsupported, - inferOutputShape)); + inferOutputShape, enableFastMath)); } // Check results @@ -270,7 +273,7 @@ int main(int argc, const char* argv[]) testCase.values.insert(testCase.values.begin(), executableName); if (RunCsvTest(testCase, runtime, enableProfiling, enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate, - enableLayerDetails, parseUnsupported, inferOutputShape) != EXIT_SUCCESS) + enableLayerDetails, parseUnsupported, inferOutputShape, enableFastMath) != EXIT_SUCCESS) { return EXIT_FAILURE; } @@ -303,7 +306,7 @@ int main(int argc, const char* argv[]) dynamicBackendsPath, modelPath, inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate, subgraphId, - enableLayerDetails, parseUnsupported, inferOutputShape); + enableLayerDetails, parseUnsupported, inferOutputShape, enableFastMath); } ARMNN_LOG(info) << "Using tuning params: " << tuningPath << "\n"; options.m_BackendOptions.emplace_back( @@ -336,6 +339,6 @@ int main(int argc, const char* argv[]) inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, inferOutputShape, - iterations, runtime); + enableFastMath, iterations, runtime); } } |