diff options
author | Nikhil Raj Arm <nikhil.raj@arm.com> | 2022-07-05 09:29:18 +0000 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-07-08 15:21:03 +0100 |
commit | f4ccb1f6339a1e9ed573f188e7f14353167b5749 (patch) | |
tree | bb53a449cd42ed919022bd52b9e369a28d5a14d4 /tests/ExecuteNetwork/ExecuteNetworkParams.hpp | |
parent | fd33a698ee3c588aa4064b70b7781ab25ff76f66 (diff) | |
download | armnn-f4ccb1f6339a1e9ed573f188e7f14353167b5749.tar.gz |
Revert "IVGCVSW-6650 Refactor ExecuteNetwork"
This reverts commit 615e06f54a4c4139e81e289991ba4084aa2f69d3.
Reason for revert: <Breaking nightlies and tests>
Change-Id: I06a4a0119463188a653bb749033f78514645bd0c
Diffstat (limited to 'tests/ExecuteNetwork/ExecuteNetworkParams.hpp')
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetworkParams.hpp | 89 |
1 files changed, 46 insertions, 43 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp index 104c1c50c2..5ef2b6ea7c 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -16,6 +16,8 @@ /// Check ExecuteNetworkProgramOptions.cpp for a description of each parameter struct ExecuteNetworkParams { + using TensorShapePtr = std::unique_ptr<armnn::TensorShape>; + enum class TfLiteExecutor { ArmNNTfLiteParser, @@ -23,49 +25,50 @@ struct ExecuteNetworkParams TfliteInterpreter }; - bool m_AllowExpandedDims; - std::string m_CachedNetworkFilePath; - std::vector<armnn::BackendId> m_ComputeDevices; - bool m_Concurrent; - bool m_DequantizeOutput; - std::string m_DynamicBackendsPath; - bool m_EnableBf16TurboMode; - bool m_EnableFastMath = false; - bool m_EnableFp16TurboMode; - bool m_EnableLayerDetails = false; - bool m_EnableProfiling; - bool m_GenerateTensorData; - bool m_InferOutputShape = false; - bool m_EnableDelegate = false; - bool m_IsModelBinary; - std::vector<std::string> m_InputNames; - std::vector<std::string> m_InputTensorDataFilePaths; - std::vector<armnn::TensorShape> m_InputTensorShapes; - size_t m_Iterations; - std::string m_ModelPath; - unsigned int m_NumberOfThreads; - bool m_OutputDetailsToStdOut; - bool m_OutputDetailsOnlyToStdOut; - std::vector<std::string> m_OutputNames; - std::vector<std::string> m_OutputTensorFiles; - bool m_ParseUnsupported = false; - bool m_PrintIntermediate; - bool m_DontPrintOutputs; - bool m_QuantizeInput; - bool m_SaveCachedNetwork; - size_t m_SubgraphId; - double m_ThresholdTime; - int m_TuningLevel; - std::string m_TuningPath; - std::string m_MLGOTuningFilePath; - TfLiteExecutor m_TfLiteExecutor; - size_t m_ThreadPoolSize; - bool m_ImportInputsIfAligned; - bool m_ReuseBuffers; + bool m_AllowExpandedDims; + std::string m_CachedNetworkFilePath; + std::vector<armnn::BackendId> m_ComputeDevices; + bool m_Concurrent; + bool m_DequantizeOutput; + std::string m_DynamicBackendsPath; + bool m_EnableBf16TurboMode; + bool m_EnableFastMath = false; + bool m_EnableFp16TurboMode; + bool m_EnableLayerDetails = false; + bool m_EnableProfiling; + bool m_GenerateTensorData; + bool m_InferOutputShape = false; + bool m_EnableDelegate = false; + std::vector<std::string> m_InputNames; + std::vector<std::string> m_InputTensorDataFilePaths; + std::vector<TensorShapePtr> m_InputTensorShapes; + std::vector<std::string> m_InputTypes; + bool m_IsModelBinary; + size_t m_Iterations; + std::string m_ModelFormat; + std::string m_ModelPath; + unsigned int m_NumberOfThreads; + bool m_OutputDetailsToStdOut; + bool m_OutputDetailsOnlyToStdOut; + std::vector<std::string> m_OutputNames; + std::vector<std::string> m_OutputTensorFiles; + std::vector<std::string> m_OutputTypes; + bool m_ParseUnsupported = false; + bool m_PrintIntermediate; + bool m_DontPrintOutputs; + bool m_QuantizeInput; + bool m_SaveCachedNetwork; + size_t m_SimultaneousIterations; + size_t m_SubgraphId; + double m_ThresholdTime; + int m_TuningLevel; + std::string m_TuningPath; + std::string m_MLGOTuningFilePath; + TfLiteExecutor m_TfLiteExecutor; + size_t m_ThreadPoolSize; + bool m_ImportInputsIfAligned; + bool m_ReuseBuffers; - std::string m_ComparisonFile; - std::vector<armnn::BackendId> m_ComparisonComputeDevices; - bool m_CompareWithTflite; // Ensures that the parameters for ExecuteNetwork fit together void ValidateParams(); |