From 615e06f54a4c4139e81e289991ba4084aa2f69d3 Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Mon, 20 Jun 2022 13:48:20 +0100 Subject: IVGCVSW-6650 Refactor ExecuteNetwork * Remove InferenceModel * Add automatic IO type, shape and name configuration * Depreciate various redundant options * Add internal output comparison Signed-off-by: Finn Williams Change-Id: I2eca248bc91e1655a99ed94990efb8059f541fa9 --- tests/ExecuteNetwork/ExecuteNetworkParams.hpp | 89 +++++++++++++-------------- 1 file changed, 43 insertions(+), 46 deletions(-) (limited to 'tests/ExecuteNetwork/ExecuteNetworkParams.hpp') diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp index 5ef2b6ea7c..104c1c50c2 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -16,8 +16,6 @@ /// Check ExecuteNetworkProgramOptions.cpp for a description of each parameter struct ExecuteNetworkParams { - using TensorShapePtr = std::unique_ptr; - enum class TfLiteExecutor { ArmNNTfLiteParser, @@ -25,50 +23,49 @@ struct ExecuteNetworkParams TfliteInterpreter }; - bool m_AllowExpandedDims; - std::string m_CachedNetworkFilePath; - std::vector m_ComputeDevices; - bool m_Concurrent; - bool m_DequantizeOutput; - std::string m_DynamicBackendsPath; - bool m_EnableBf16TurboMode; - bool m_EnableFastMath = false; - bool m_EnableFp16TurboMode; - bool m_EnableLayerDetails = false; - bool m_EnableProfiling; - bool m_GenerateTensorData; - bool m_InferOutputShape = false; - bool m_EnableDelegate = false; - std::vector m_InputNames; - std::vector m_InputTensorDataFilePaths; - std::vector m_InputTensorShapes; - std::vector m_InputTypes; - bool m_IsModelBinary; - size_t m_Iterations; - std::string m_ModelFormat; - std::string m_ModelPath; - unsigned int m_NumberOfThreads; - bool m_OutputDetailsToStdOut; - bool m_OutputDetailsOnlyToStdOut; - std::vector m_OutputNames; - std::vector m_OutputTensorFiles; - std::vector m_OutputTypes; - bool m_ParseUnsupported = false; - bool m_PrintIntermediate; - bool m_DontPrintOutputs; - bool m_QuantizeInput; - bool m_SaveCachedNetwork; - size_t m_SimultaneousIterations; - size_t m_SubgraphId; - double m_ThresholdTime; - int m_TuningLevel; - std::string m_TuningPath; - std::string m_MLGOTuningFilePath; - TfLiteExecutor m_TfLiteExecutor; - size_t m_ThreadPoolSize; - bool m_ImportInputsIfAligned; - bool m_ReuseBuffers; + bool m_AllowExpandedDims; + std::string m_CachedNetworkFilePath; + std::vector m_ComputeDevices; + bool m_Concurrent; + bool m_DequantizeOutput; + std::string m_DynamicBackendsPath; + bool m_EnableBf16TurboMode; + bool m_EnableFastMath = false; + bool m_EnableFp16TurboMode; + bool m_EnableLayerDetails = false; + bool m_EnableProfiling; + bool m_GenerateTensorData; + bool m_InferOutputShape = false; + bool m_EnableDelegate = false; + bool m_IsModelBinary; + std::vector m_InputNames; + std::vector m_InputTensorDataFilePaths; + std::vector m_InputTensorShapes; + size_t m_Iterations; + std::string m_ModelPath; + unsigned int m_NumberOfThreads; + bool m_OutputDetailsToStdOut; + bool m_OutputDetailsOnlyToStdOut; + std::vector m_OutputNames; + std::vector m_OutputTensorFiles; + bool m_ParseUnsupported = false; + bool m_PrintIntermediate; + bool m_DontPrintOutputs; + bool m_QuantizeInput; + bool m_SaveCachedNetwork; + size_t m_SubgraphId; + double m_ThresholdTime; + int m_TuningLevel; + std::string m_TuningPath; + std::string m_MLGOTuningFilePath; + TfLiteExecutor m_TfLiteExecutor; + size_t m_ThreadPoolSize; + bool m_ImportInputsIfAligned; + bool m_ReuseBuffers; + std::string m_ComparisonFile; + std::vector m_ComparisonComputeDevices; + bool m_CompareWithTflite; // Ensures that the parameters for ExecuteNetwork fit together void ValidateParams(); -- cgit v1.2.1