aboutsummaryrefslogtreecommitdiff
path: root/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/ExecuteNetwork/ExecuteNetworkParams.hpp')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.hpp48
1 files changed, 48 insertions, 0 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
new file mode 100644
index 0000000000..5490230ede
--- /dev/null
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/BackendId.hpp>
+#include <armnn/Tensor.hpp>
+
+/// Holds all parameters necessary to execute a network
+/// Check ExecuteNetworkProgramOptions.cpp for a description of each parameter
+struct ExecuteNetworkParams
+{
+ using TensorShapePtr = std::unique_ptr<armnn::TensorShape>;
+
+ std::vector<armnn::BackendId> m_ComputeDevices;
+ bool m_DequantizeOutput;
+ std::string m_DynamicBackendsPath;
+ bool m_EnableBf16TurboMode;
+ bool m_EnableFastMath = false;
+ bool m_EnableFp16TurboMode;
+ bool m_EnableLayerDetails = false;
+ bool m_EnableProfiling;
+ bool m_GenerateTensorData;
+ bool m_InferOutputShape = false;
+ std::vector<std::string> m_InputNames;
+ std::vector<std::string> m_InputTensorDataFilePaths;
+ std::vector<TensorShapePtr> m_InputTensorShapes;
+ std::vector<std::string> m_InputTypes;
+ bool m_IsModelBinary;
+ size_t m_Iterations;
+ std::string m_ModelFormat;
+ std::string m_ModelPath;
+ std::vector<std::string> m_OutputNames;
+ std::vector<std::string> m_OutputTensorFiles;
+ std::vector<std::string> m_OutputTypes;
+ bool m_ParseUnsupported = false;
+ bool m_PrintIntermediate;
+ bool m_QuantizeInput;
+ size_t m_SubgraphId;
+ double m_ThresholdTime;
+ int m_TuningLevel;
+ std::string m_TuningPath;
+
+ // Ensures that the parameters for ExecuteNetwork fit together
+ void ValidateParams();
+};