From a0f8b15d4ddb5075f380003ff31b271d389d3b66 Mon Sep 17 00:00:00 2001 From: James Conroy Date: Tue, 21 Jun 2022 11:31:47 +0000 Subject: Revert "IVGCVSW-6873 Import inputs but don't export outputs fails." This reverts commit 03bf98a8bc51ad20eef4b9ca5fbf6ce15e063721. Reason for revert: Caused failures in tests located in internal repo. Change-Id: If35cb0ede349b270e4e7827324382e09455d8cfa --- .../backendsCommon/test/CompatibilityTests.cpp | 2 +- .../backendsCommon/test/EndToEndTestImpl.hpp | 32 +++++----------------- .../backendsCommon/test/OptimizedNetworkTests.cpp | 2 +- src/backends/cl/test/ClCustomAllocatorTests.cpp | 1 - src/backends/cl/test/ClFallbackTests.cpp | 2 -- src/backends/cl/test/ClImportTensorHandleTests.cpp | 7 ----- src/backends/cl/test/ClOptimizedNetworkTests.cpp | 2 +- src/backends/neon/test/NeonFallbackTests.cpp | 6 ---- .../neon/test/NeonOptimizedNetworkTests.cpp | 2 +- 9 files changed, 11 insertions(+), 45 deletions(-) (limited to 'src/backends') diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp index 9c85ffcfc3..c69a4b5f91 100644 --- a/src/backends/backendsCommon/test/CompatibilityTests.cpp +++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp @@ -73,7 +73,7 @@ TEST_CASE("Neon_Cl_DirectCompatibility_Test") graph.TopologicalSort(); std::vector errors; - auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors); + auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors); CHECK(result.m_Error == false); CHECK(result.m_Warning == false); diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index cc5aa23ca3..77901df444 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -204,9 +204,7 @@ inline void ImportNonAlignedInputPointerTest(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); CHECK(optNet); // Loads it into the runtime. @@ -271,10 +269,7 @@ inline void ExportNonAlignedOutputPointerTest(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); CHECK(optNet); // Loads it into the runtime. @@ -345,10 +340,7 @@ inline void ImportAlignedPointerTest(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); CHECK(optNet); // Loads it into the runtime. @@ -432,9 +424,7 @@ inline void ImportOnlyWorkload(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); INFO("Load Network"); // Load it into the runtime. It should pass. @@ -524,9 +514,7 @@ inline void ExportOnlyWorkload(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); INFO("Load Network"); // Load it into the runtime. It should pass. @@ -613,10 +601,7 @@ inline void ImportAndExportWorkload(std::vector backends) input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); INFO("Load Network"); // Load it into the runtime. It should pass. @@ -709,10 +694,7 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vectorGetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); // Optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Loads it into the runtime. NetworkId netId; diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp index cd865def71..bcea0610db 100644 --- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp +++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp @@ -421,7 +421,7 @@ TEST_CASE("OptimizeNetworkCopy") std::vector preferredBackends { "CpuRef" }; armnn::ModelOptions modelOptions; - armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false); + armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions); std::vector errorMessages; // optimize the network. diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp index 251c98fcad..139e688dc2 100644 --- a/src/backends/cl/test/ClCustomAllocatorTests.cpp +++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp @@ -120,7 +120,6 @@ TEST_CASE("ClCustomAllocatorTest") // Optimise ArmNN network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"GpuAcc"}, run->GetDeviceSpec(), optOptions); CHECK(optNet); diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp index 51a983a681..6ac94337ba 100644 --- a/src/backends/cl/test/ClFallbackTests.cpp +++ b/src/backends/cl/test/ClFallbackTests.cpp @@ -50,7 +50,6 @@ TEST_CASE("ClImportEnabledFallbackToNeon") // optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -331,7 +330,6 @@ TEST_CASE("ClImportEnabledFallbackSubgraphToNeon") // optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp index 9a075d2b7d..20537b3c81 100644 --- a/src/backends/cl/test/ClImportTensorHandleTests.cpp +++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp @@ -142,7 +142,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd") // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -339,7 +338,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd") // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -472,7 +470,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -616,7 +613,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -751,7 +747,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16 // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -901,7 +896,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -1123,7 +1117,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp index 6648759a9a..cf17eae208 100644 --- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp +++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp @@ -130,7 +130,7 @@ TEST_CASE("FastMathEnabledTestOnGpuAcc") auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get()); - CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values. + CHECK(modelOptionsOut.size() == 1); CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled"); CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true); } diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp index 8e0e0ab99b..d2de843fd9 100644 --- a/src/backends/neon/test/NeonFallbackTests.cpp +++ b/src/backends/neon/test/NeonFallbackTests.cpp @@ -60,7 +60,6 @@ TEST_CASE("FallbackImportToCpuAcc") std::vector backends = { "MockRef", Compute::CpuAcc }; OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -204,7 +203,6 @@ TEST_CASE("FallbackPaddingCopyToCpuAcc") std::vector backends = { "MockRef", Compute::CpuAcc }; OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -340,7 +338,6 @@ TEST_CASE("FallbackImportFromCpuAcc") std::vector backends = { "MockRef", Compute::CpuAcc }; OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -485,7 +482,6 @@ TEST_CASE("FallbackPaddingCopyFromCpuAcc") std::vector backends = { "MockRef", Compute::CpuAcc }; OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -750,7 +746,6 @@ TEST_CASE("NeonImportEnabledFallbackToCl") // optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -1042,7 +1037,6 @@ TEST_CASE("NeonImportEnabledFallbackSubgraphToCl") // optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp index dcda9bfd07..9b448b270d 100644 --- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp +++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp @@ -106,7 +106,7 @@ TEST_CASE("FastMathEnabledTestOnCpuAcc") auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get()); - CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values. + CHECK(modelOptionsOut.size() == 1); CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled"); CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true); } -- cgit v1.2.1