diff options
author | Matthew Bentham <matthew.bentham@arm.com> | 2019-04-09 13:10:46 +0100 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-04-15 15:29:37 +0000 |
commit | 3e68b97946bfee3c89ec2d4363a22550a10b2e55 (patch) | |
tree | d7d9be27aff46a18ac9442092d908223f435dcd7 /tests/InferenceModel.hpp | |
parent | 200e38039cf2cef21ae9ba6f86fab6fd524e5077 (diff) | |
download | armnn-3e68b97946bfee3c89ec2d4363a22550a10b2e55.tar.gz |
IVGCVSW-2928 Fix issue with GPU profiling
Correctly enable GPU profiling when test profiling is enabled.
Remove extra copy of the profiling-enabled flag from InferenceModel::Params
and correctly pass around the copy that is in InferenceTestOptions.
!referencetests:180329
Change-Id: I0daa1bab2e7068fc479bf417a553183b1d922166
Signed-off-by: Matthew Bentham <matthew.bentham@arm.com>
Diffstat (limited to 'tests/InferenceModel.hpp')
-rw-r--r-- | tests/InferenceModel.hpp | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp index e168923048..cb6daefa9e 100644 --- a/tests/InferenceModel.hpp +++ b/tests/InferenceModel.hpp @@ -87,7 +87,6 @@ struct Params std::vector<armnn::TensorShape> m_InputShapes; std::vector<std::string> m_OutputBindings; std::vector<armnn::BackendId> m_ComputeDevices; - bool m_EnableProfiling; size_t m_SubgraphId; bool m_IsModelBinary; bool m_VisualizePostOptimizationModel; @@ -95,7 +94,6 @@ struct Params Params() : m_ComputeDevices{"CpuRef"} - , m_EnableProfiling(false) , m_SubgraphId(0) , m_IsModelBinary(true) , m_VisualizePostOptimizationModel(false) @@ -428,8 +426,10 @@ public: "to FP16 where the backend supports it."); } - InferenceModel(const Params& params, const std::shared_ptr<armnn::IRuntime>& runtime = nullptr) - : m_EnableProfiling(params.m_EnableProfiling) + InferenceModel(const Params& params, + bool enableProfiling, + const std::shared_ptr<armnn::IRuntime>& runtime = nullptr) + : m_EnableProfiling(enableProfiling) { if (runtime) { |