aboutsummaryrefslogtreecommitdiff
path: root/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2022-07-07 14:24:59 +0100
committerNikhil Raj <nikhil.raj@arm.com>2022-07-28 15:08:22 +0100
commit83b429107a4bb1fe84e756c29d8ad3771d4beeee (patch)
treec383d2692365a8166b949f90267e152f0624704f /tests/ExecuteNetwork/ExecuteNetworkParams.hpp
parent46f298657c14c1b0a4b0690ecce49f64dc0a7010 (diff)
downloadarmnn-83b429107a4bb1fe84e756c29d8ad3771d4beeee.tar.gz
Revert "Revert "IVGCVSW-6650 Refactor ExecuteNetwork""
This reverts commit 1a7f033768acb27da11503bd29abb468d2e77f9e. List of fixes to be able to add this code again: * "emplacing_back" the vector inputTensors into the vector m_InputTensorsVec outside the for loop * GetIOInfo() uses IOptimizedNetwork instead of INetwork, where the infered shapes are not saved * Add missing data type Signed32 to SetupInputsAndOutputs() * PrintOutputTensors() prints the actual output without dequantizing * Add profilingDetailsMethod as input in networkProperties in ArmNNExecutor constructor * Fix typos Change-Id: I91de166f87228282db3efa27431fe91458834442 Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ic6634d48892d11e5f146cdf285e1e333e93e9937 Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
Diffstat (limited to 'tests/ExecuteNetwork/ExecuteNetworkParams.hpp')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.hpp90
1 files changed, 43 insertions, 47 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index 5ef2b6ea7c..e60e3b8877 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,8 +16,6 @@
/// Check ExecuteNetworkProgramOptions.cpp for a description of each parameter
struct ExecuteNetworkParams
{
- using TensorShapePtr = std::unique_ptr<armnn::TensorShape>;
-
enum class TfLiteExecutor
{
ArmNNTfLiteParser,
@@ -25,50 +23,48 @@ struct ExecuteNetworkParams
TfliteInterpreter
};
- bool m_AllowExpandedDims;
- std::string m_CachedNetworkFilePath;
- std::vector<armnn::BackendId> m_ComputeDevices;
- bool m_Concurrent;
- bool m_DequantizeOutput;
- std::string m_DynamicBackendsPath;
- bool m_EnableBf16TurboMode;
- bool m_EnableFastMath = false;
- bool m_EnableFp16TurboMode;
- bool m_EnableLayerDetails = false;
- bool m_EnableProfiling;
- bool m_GenerateTensorData;
- bool m_InferOutputShape = false;
- bool m_EnableDelegate = false;
- std::vector<std::string> m_InputNames;
- std::vector<std::string> m_InputTensorDataFilePaths;
- std::vector<TensorShapePtr> m_InputTensorShapes;
- std::vector<std::string> m_InputTypes;
- bool m_IsModelBinary;
- size_t m_Iterations;
- std::string m_ModelFormat;
- std::string m_ModelPath;
- unsigned int m_NumberOfThreads;
- bool m_OutputDetailsToStdOut;
- bool m_OutputDetailsOnlyToStdOut;
- std::vector<std::string> m_OutputNames;
- std::vector<std::string> m_OutputTensorFiles;
- std::vector<std::string> m_OutputTypes;
- bool m_ParseUnsupported = false;
- bool m_PrintIntermediate;
- bool m_DontPrintOutputs;
- bool m_QuantizeInput;
- bool m_SaveCachedNetwork;
- size_t m_SimultaneousIterations;
- size_t m_SubgraphId;
- double m_ThresholdTime;
- int m_TuningLevel;
- std::string m_TuningPath;
- std::string m_MLGOTuningFilePath;
- TfLiteExecutor m_TfLiteExecutor;
- size_t m_ThreadPoolSize;
- bool m_ImportInputsIfAligned;
- bool m_ReuseBuffers;
-
+ bool m_AllowExpandedDims;
+ std::string m_CachedNetworkFilePath;
+ std::vector<armnn::BackendId> m_ComputeDevices;
+ bool m_Concurrent;
+ bool m_DequantizeOutput;
+ std::string m_DynamicBackendsPath;
+ bool m_EnableBf16TurboMode;
+ bool m_EnableFastMath = false;
+ bool m_EnableFp16TurboMode;
+ bool m_EnableLayerDetails = false;
+ bool m_EnableProfiling;
+ bool m_GenerateTensorData;
+ bool m_InferOutputShape = false;
+ bool m_EnableDelegate = false;
+ bool m_IsModelBinary;
+ std::vector<std::string> m_InputNames;
+ std::vector<std::string> m_InputTensorDataFilePaths;
+ std::vector<armnn::TensorShape> m_InputTensorShapes;
+ size_t m_Iterations;
+ std::string m_ModelPath;
+ unsigned int m_NumberOfThreads;
+ bool m_OutputDetailsToStdOut;
+ bool m_OutputDetailsOnlyToStdOut;
+ std::vector<std::string> m_OutputNames;
+ std::vector<std::string> m_OutputTensorFiles;
+ bool m_ParseUnsupported = false;
+ bool m_PrintIntermediate;
+ bool m_DontPrintOutputs;
+ bool m_QuantizeInput;
+ bool m_SaveCachedNetwork;
+ size_t m_SubgraphId;
+ double m_ThresholdTime;
+ int m_TuningLevel;
+ std::string m_TuningPath;
+ std::string m_MLGOTuningFilePath;
+ TfLiteExecutor m_TfLiteExecutor;
+ size_t m_ThreadPoolSize;
+ bool m_ImportInputsIfAligned;
+ bool m_ReuseBuffers;
+ std::string m_ComparisonFile;
+ std::vector<armnn::BackendId> m_ComparisonComputeDevices;
+ bool m_CompareWithTflite;
// Ensures that the parameters for ExecuteNetwork fit together
void ValidateParams();