From 283a8b4aeaebf27c7f14e0c9c4cbfaf06a577cf5 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 22 Sep 2020 14:35:19 +0100 Subject: IVGCVSW-5318 'Create a Neon/CL Workload Unit Test fast_math option enabled' * Unit test implemented to make sure it returns WINOGRAD * Updated the enable-fast-math option in ExecuteNetwork to be consistent Signed-off-by: Sadik Armagan Change-Id: Id64f114ae47966def69a9eef0770a4251ee56a41 --- tests/ExecuteNetwork/ExecuteNetwork.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index 5924348763..c15e33fc13 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -138,7 +138,7 @@ int main(int argc, const char* argv[]) ("infer-output-shape", po::bool_switch()->default_value(false), "Infers output tensor shape from input tensor shape and validate where applicable (where supported by " "parser)") - ("enable_fast_math", po::bool_switch()->default_value(false), + ("enable-fast-math", po::bool_switch()->default_value(false), "Enable fast_math computation of Convolution2D operator where applicable (where supported by backend)"); } catch (const std::exception& e) @@ -189,7 +189,7 @@ int main(int argc, const char* argv[]) bool parseUnsupported = vm["parse-unsupported"].as(); bool timelineEnabled = vm["timeline-profiling"].as(); bool inferOutputShape = vm["infer-output-shape"].as(); - bool enableFastMath = vm["enable_fast_math"].as(); + bool enableFastMath = vm["enable-fast-math"].as(); if (enableBf16TurboMode && enableFp16TurboMode) { -- cgit v1.2.1