aboutsummaryrefslogtreecommitdiff
path: root/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/ExecuteNetwork/ExecuteNetworkParams.hpp')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.hpp90
1 files changed, 43 insertions, 47 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index 5ef2b6ea7c..e60e3b8877 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,8 +16,6 @@
/// Check ExecuteNetworkProgramOptions.cpp for a description of each parameter
struct ExecuteNetworkParams
{
- using TensorShapePtr = std::unique_ptr<armnn::TensorShape>;
-
enum class TfLiteExecutor
{
ArmNNTfLiteParser,
@@ -25,50 +23,48 @@ struct ExecuteNetworkParams
TfliteInterpreter
};
- bool m_AllowExpandedDims;
- std::string m_CachedNetworkFilePath;
- std::vector<armnn::BackendId> m_ComputeDevices;
- bool m_Concurrent;
- bool m_DequantizeOutput;
- std::string m_DynamicBackendsPath;
- bool m_EnableBf16TurboMode;
- bool m_EnableFastMath = false;
- bool m_EnableFp16TurboMode;
- bool m_EnableLayerDetails = false;
- bool m_EnableProfiling;
- bool m_GenerateTensorData;
- bool m_InferOutputShape = false;
- bool m_EnableDelegate = false;
- std::vector<std::string> m_InputNames;
- std::vector<std::string> m_InputTensorDataFilePaths;
- std::vector<TensorShapePtr> m_InputTensorShapes;
- std::vector<std::string> m_InputTypes;
- bool m_IsModelBinary;
- size_t m_Iterations;
- std::string m_ModelFormat;
- std::string m_ModelPath;
- unsigned int m_NumberOfThreads;
- bool m_OutputDetailsToStdOut;
- bool m_OutputDetailsOnlyToStdOut;
- std::vector<std::string> m_OutputNames;
- std::vector<std::string> m_OutputTensorFiles;
- std::vector<std::string> m_OutputTypes;
- bool m_ParseUnsupported = false;
- bool m_PrintIntermediate;
- bool m_DontPrintOutputs;
- bool m_QuantizeInput;
- bool m_SaveCachedNetwork;
- size_t m_SimultaneousIterations;
- size_t m_SubgraphId;
- double m_ThresholdTime;
- int m_TuningLevel;
- std::string m_TuningPath;
- std::string m_MLGOTuningFilePath;
- TfLiteExecutor m_TfLiteExecutor;
- size_t m_ThreadPoolSize;
- bool m_ImportInputsIfAligned;
- bool m_ReuseBuffers;
-
+ bool m_AllowExpandedDims;
+ std::string m_CachedNetworkFilePath;
+ std::vector<armnn::BackendId> m_ComputeDevices;
+ bool m_Concurrent;
+ bool m_DequantizeOutput;
+ std::string m_DynamicBackendsPath;
+ bool m_EnableBf16TurboMode;
+ bool m_EnableFastMath = false;
+ bool m_EnableFp16TurboMode;
+ bool m_EnableLayerDetails = false;
+ bool m_EnableProfiling;
+ bool m_GenerateTensorData;
+ bool m_InferOutputShape = false;
+ bool m_EnableDelegate = false;
+ bool m_IsModelBinary;
+ std::vector<std::string> m_InputNames;
+ std::vector<std::string> m_InputTensorDataFilePaths;
+ std::vector<armnn::TensorShape> m_InputTensorShapes;
+ size_t m_Iterations;
+ std::string m_ModelPath;
+ unsigned int m_NumberOfThreads;
+ bool m_OutputDetailsToStdOut;
+ bool m_OutputDetailsOnlyToStdOut;
+ std::vector<std::string> m_OutputNames;
+ std::vector<std::string> m_OutputTensorFiles;
+ bool m_ParseUnsupported = false;
+ bool m_PrintIntermediate;
+ bool m_DontPrintOutputs;
+ bool m_QuantizeInput;
+ bool m_SaveCachedNetwork;
+ size_t m_SubgraphId;
+ double m_ThresholdTime;
+ int m_TuningLevel;
+ std::string m_TuningPath;
+ std::string m_MLGOTuningFilePath;
+ TfLiteExecutor m_TfLiteExecutor;
+ size_t m_ThreadPoolSize;
+ bool m_ImportInputsIfAligned;
+ bool m_ReuseBuffers;
+ std::string m_ComparisonFile;
+ std::vector<armnn::BackendId> m_ComparisonComputeDevices;
+ bool m_CompareWithTflite;
// Ensures that the parameters for ExecuteNetwork fit together
void ValidateParams();