From 3e68b97946bfee3c89ec2d4363a22550a10b2e55 Mon Sep 17 00:00:00 2001 From: Matthew Bentham Date: Tue, 9 Apr 2019 13:10:46 +0100 Subject: IVGCVSW-2928 Fix issue with GPU profiling Correctly enable GPU profiling when test profiling is enabled. Remove extra copy of the profiling-enabled flag from InferenceModel::Params and correctly pass around the copy that is in InferenceTestOptions. !referencetests:180329 Change-Id: I0daa1bab2e7068fc479bf417a553183b1d922166 Signed-off-by: Matthew Bentham --- tests/ExecuteNetwork/ExecuteNetwork.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'tests/ExecuteNetwork') diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index 9ac66d1cd3..1de22ed5d0 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -236,10 +236,9 @@ int MainImpl(const char* modelPath, params.m_OutputBindings.push_back(outputName); } - params.m_EnableProfiling = enableProfiling; params.m_SubgraphId = subgraphId; params.m_EnableFp16TurboMode = enableFp16TurboMode; - InferenceModel model(params, runtime); + InferenceModel model(params, enableProfiling, runtime); for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i) { -- cgit v1.2.1