From 626bd90378670eb5fd76f94526395430b752ad9e Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Tue, 21 Jun 2022 13:16:23 +0000 Subject: Revert "Revert "IVGCVSW-6873 Import inputs but don't export outputs fails."" This reverts commit a0f8b15d4ddb5075f380003ff31b271d389d3b66. Reason for revert: Change-Id: Ibc4a77fa008643849da7330391942e4c87b941e2 --- .../backendsCommon/test/CompatibilityTests.cpp | 2 +- .../backendsCommon/test/EndToEndTestImpl.hpp | 32 +++++++++++++++++----- .../backendsCommon/test/OptimizedNetworkTests.cpp | 2 +- 3 files changed, 27 insertions(+), 9 deletions(-) (limited to 'src/backends/backendsCommon') diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp index c69a4b5f91..9c85ffcfc3 100644 --- a/src/backends/backendsCommon/test/CompatibilityTests.cpp +++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp @@ -73,7 +73,7 @@ TEST_CASE("Neon_Cl_DirectCompatibility_Test") graph.TopologicalSort(); std::vector errors; - auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors); + auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors); CHECK(result.m_Error == false); CHECK(result.m_Warning == false); diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index 77901df444..cc5aa23ca3 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -204,7 +204,9 @@ inline void ImportNonAlignedInputPointerTest(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + OptimizerOptions optimizedOptions; + optimizedOptions.m_ImportEnabled = true; + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); CHECK(optNet); // Loads it into the runtime. @@ -269,7 +271,10 @@ inline void ExportNonAlignedOutputPointerTest(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + OptimizerOptions optimizedOptions; + optimizedOptions.m_ImportEnabled = true; + optimizedOptions.m_ExportEnabled = true; + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); CHECK(optNet); // Loads it into the runtime. @@ -340,7 +345,10 @@ inline void ImportAlignedPointerTest(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + OptimizerOptions optimizedOptions; + optimizedOptions.m_ImportEnabled = true; + optimizedOptions.m_ExportEnabled = true; + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); CHECK(optNet); // Loads it into the runtime. @@ -424,7 +432,9 @@ inline void ImportOnlyWorkload(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // optimize the network - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + OptimizerOptions optimizedOptions; + optimizedOptions.m_ImportEnabled = true; + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); INFO("Load Network"); // Load it into the runtime. It should pass. @@ -514,7 +524,9 @@ inline void ExportOnlyWorkload(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // optimize the network - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + OptimizerOptions optimizedOptions; + optimizedOptions.m_ExportEnabled = true; + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); INFO("Load Network"); // Load it into the runtime. It should pass. @@ -601,7 +613,10 @@ inline void ImportAndExportWorkload(std::vector backends) input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + OptimizerOptions optimizedOptions; + optimizedOptions.m_ImportEnabled = true; + optimizedOptions.m_ExportEnabled = true; + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); INFO("Load Network"); // Load it into the runtime. It should pass. @@ -694,7 +709,10 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vectorGetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); // Optimize the network - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + OptimizerOptions optimizedOptions; + optimizedOptions.m_ImportEnabled = true; + optimizedOptions.m_ExportEnabled = true; + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); // Loads it into the runtime. NetworkId netId; diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp index bcea0610db..cd865def71 100644 --- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp +++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp @@ -421,7 +421,7 @@ TEST_CASE("OptimizeNetworkCopy") std::vector preferredBackends { "CpuRef" }; armnn::ModelOptions modelOptions; - armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions); + armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false); std::vector errorMessages; // optimize the network. -- cgit v1.2.1