aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-07-14 10:02:22 +0100
committerSadik Armagan <sadik.armagan@arm.com>2020-07-29 13:05:35 +0000
commita9c2ce123a6a5a68728d040a0323c482bbe46903 (patch)
tree18f0556645c4228ae3551c9dd7a62a3b7154e93b
parent57512af8c7a628097c644fdae276cf0f6da336ee (diff)
downloadarmnn-a9c2ce123a6a5a68728d040a0323c482bbe46903.tar.gz
IVGCVSW-4980 Introduce InferAndValidate option to ExecuteNetwork for parsers
* Introduced infer-output-shape option to TfLiteParser in ExecuteNetwork app !armnn:3591 Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I30bd5e51ac2b6759169e22a44586fd97986f2402
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp18
-rw-r--r--tests/InferenceModel.hpp5
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp17
3 files changed, 28 insertions, 12 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 9b79c8c6b4..f2763a72b7 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -134,7 +134,10 @@ int main(int argc, const char* argv[])
"Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
"Requires tuning-path to be set, default is set to 0 (No tuning run)")
("parse-unsupported", po::bool_switch()->default_value(false),
- "Add unsupported operators as stand-in layers (where supported by parser)");
+ "Add unsupported operators as stand-in layers (where supported by parser)")
+ ("infer-output-shape", po::bool_switch()->default_value(false),
+ "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
+ "parser)");
}
catch (const std::exception& e)
{
@@ -183,6 +186,7 @@ int main(int argc, const char* argv[])
bool fileOnlyExternalProfiling = vm["file-only-external-profiling"].as<bool>();
bool parseUnsupported = vm["parse-unsupported"].as<bool>();
bool timelineEnabled = vm["timeline-profiling"].as<bool>();
+ bool inferOutputShape = vm["infer-output-shape"].as<bool>();
if (enableBf16TurboMode && enableFp16TurboMode)
{
@@ -245,7 +249,8 @@ int main(int argc, const char* argv[])
testCase.values.insert(testCase.values.begin(), executableName);
results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
enableProfiling, enableFp16TurboMode, enableBf16TurboMode, thresholdTime,
- printIntermediate, enableLayerDetails, parseUnsupported));
+ printIntermediate, enableLayerDetails, parseUnsupported,
+ inferOutputShape));
}
// Check results
@@ -265,7 +270,7 @@ int main(int argc, const char* argv[])
testCase.values.insert(testCase.values.begin(), executableName);
if (RunCsvTest(testCase, runtime, enableProfiling,
enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate,
- enableLayerDetails, parseUnsupported) != EXIT_SUCCESS)
+ enableLayerDetails, parseUnsupported, inferOutputShape) != EXIT_SUCCESS)
{
return EXIT_FAILURE;
}
@@ -298,7 +303,7 @@ int main(int argc, const char* argv[])
dynamicBackendsPath, modelPath, inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput,
outputTypes, outputNames, outputTensorFiles, dequantizeOutput, enableProfiling,
enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate, subgraphId,
- enableLayerDetails, parseUnsupported);
+ enableLayerDetails, parseUnsupported, inferOutputShape);
}
ARMNN_LOG(info) << "Using tuning params: " << tuningPath << "\n";
options.m_BackendOptions.emplace_back(
@@ -330,6 +335,7 @@ int main(int argc, const char* argv[])
return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath,
inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
- thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, iterations, runtime);
+ thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, inferOutputShape,
+ iterations, runtime);
}
}
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 5588d55aaf..68ee8ae81a 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -95,6 +95,7 @@ struct Params
bool m_EnableBf16TurboMode;
bool m_PrintIntermediateLayers;
bool m_ParseUnsupported;
+ bool m_InferOutputShape;
Params()
: m_ComputeDevices{}
@@ -105,6 +106,7 @@ struct Params
, m_EnableBf16TurboMode(false)
, m_PrintIntermediateLayers(false)
, m_ParseUnsupported(false)
+ , m_InferOutputShape(false)
{}
};
@@ -241,6 +243,7 @@ public:
// Create a network from a file on disk
IParser::TfLiteParserOptions options;
options.m_StandInLayerForUnsupported = params.m_ParseUnsupported;
+ options.m_InferAndValidate = params.m_InferOutputShape;
auto parser(IParser::Create(options));
armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 31f37916b8..69941d5678 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <armnn/ArmNN.hpp>
@@ -375,6 +375,7 @@ struct ExecuteNetworkParams
bool m_EnableLayerDetails = false;
bool m_GenerateTensorData;
bool m_ParseUnsupported = false;
+ bool m_InferOutputShape = false;
};
template<typename TParser, typename TDataType>
@@ -397,6 +398,7 @@ int MainImpl(const ExecuteNetworkParams& params,
inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
+ inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape;
for(const std::string& inputName: params.m_InputNames)
{
@@ -550,6 +552,7 @@ int RunTest(const std::string& format,
const size_t subgraphId,
bool enableLayerDetails = false,
bool parseUnsupported = false,
+ bool inferOutputShape = false,
const size_t iterations = 1,
const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
{
@@ -678,6 +681,7 @@ int RunTest(const std::string& format,
params.m_EnableLayerDetails = enableLayerDetails;
params.m_GenerateTensorData = inputTensorDataFilePathsVector.empty();
params.m_ParseUnsupported = parseUnsupported;
+ params.m_InferOutputShape = inferOutputShape;
// Warn if ExecuteNetwork will generate dummy input data
if (params.m_GenerateTensorData)
@@ -749,7 +753,7 @@ int RunTest(const std::string& format,
int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime,
const bool enableProfiling, const bool enableFp16TurboMode, const bool enableBf16TurboMode,
const double& thresholdTime, const bool printIntermediate, bool enableLayerDetails = false,
- bool parseUnuspported = false)
+ bool parseUnuspported = false, bool inferOutputShape = false)
{
IgnoreUnused(runtime);
std::string modelFormat;
@@ -869,7 +873,8 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
- thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnuspported);
+ thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnuspported,
+ inferOutputShape);
}
#if defined(ARMCOMPUTECL_ENABLED)
@@ -895,7 +900,8 @@ int RunCLTuning(const std::string& tuningPath,
bool printIntermediate,
const size_t subgraphId,
bool enableLayerDetails = false,
- bool parseUnsupported = false)
+ bool parseUnsupported = false,
+ bool inferOutputShape = false)
{
armnn::IRuntime::CreationOptions options;
options.m_BackendOptions.emplace_back(
@@ -917,7 +923,8 @@ int RunCLTuning(const std::string& tuningPath,
int state = RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
- thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, 1, runtime);
+ thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported,
+ inferOutputShape, 1, runtime);
ARMNN_LOG(info) << "Tuning time: " << std::setprecision(2)
<< std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";