From a0f8b15d4ddb5075f380003ff31b271d389d3b66 Mon Sep 17 00:00:00 2001 From: James Conroy Date: Tue, 21 Jun 2022 11:31:47 +0000 Subject: Revert "IVGCVSW-6873 Import inputs but don't export outputs fails." This reverts commit 03bf98a8bc51ad20eef4b9ca5fbf6ce15e063721. Reason for revert: Caused failures in tests located in internal repo. Change-Id: If35cb0ede349b270e4e7827324382e09455d8cfa --- src/armnn/LoadedNetwork.cpp | 96 +---------- src/armnn/Network.cpp | 21 +-- src/armnn/Network.hpp | 1 - src/armnn/Runtime.hpp | 4 +- src/armnn/test/RuntimeTests.cpp | 187 +-------------------- src/armnn/test/TensorHandleStrategyTest.cpp | 2 +- .../backendsCommon/test/CompatibilityTests.cpp | 2 +- .../backendsCommon/test/EndToEndTestImpl.hpp | 32 +--- .../backendsCommon/test/OptimizedNetworkTests.cpp | 2 +- src/backends/cl/test/ClCustomAllocatorTests.cpp | 1 - src/backends/cl/test/ClFallbackTests.cpp | 2 - src/backends/cl/test/ClImportTensorHandleTests.cpp | 7 - src/backends/cl/test/ClOptimizedNetworkTests.cpp | 2 +- src/backends/neon/test/NeonFallbackTests.cpp | 6 - .../neon/test/NeonOptimizedNetworkTests.cpp | 2 +- 15 files changed, 31 insertions(+), 336 deletions(-) (limited to 'src') diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index a27add921e..ec79d5da3e 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -84,87 +84,6 @@ void AddWorkloadStructure(std::unique_ptr& timelineUtils } // anonymous -/** - * This function performs a sanity check to ensure that the combination of input and output memory source matches the - * values for importEnabled and exportEnabled that were specified during optimization. During optimization the tensor - * handle factories are chosen based on whether import and export are enabled. If the user then specifies something - * incompatible here it can lead to problems. - * - * @param optimizedOptions - * @param networkProperties - */ -void ValidateSourcesMatchOptimizedNetwork(std::vector optimizedOptions, - const INetworkProperties& networkProperties) -{ - // Find the "Global" backend options. During the optimize phase the values of importEnabled and exportEnabled are - // added as backend options. - const vector::iterator& backendItr = - find_if(optimizedOptions.begin(), optimizedOptions.end(), [](const BackendOptions& backend) { - if (backend.GetBackendId().Get() == "Global") - { - return true; - } - else - { - return false; - } - }); - bool importEnabled = false; - bool exportEnabled = false; - if (backendItr != optimizedOptions.end()) - { - // Find the importEnabled and exportEnabled values. - for (size_t i = 0; i < backendItr->GetOptionCount(); i++) - { - const BackendOptions::BackendOption& option = backendItr->GetOption(i); - if (option.GetName() == "ImportEnabled") - { - importEnabled = option.GetValue().AsBool(); - } - if (option.GetName() == "ExportEnabled") - { - exportEnabled = option.GetValue().AsBool(); - } - } - } - - // Now that we have values for import and export compare them to the MemorySource variables. - // Any value of MemorySource that's not "Undefined" implies that we need to do an import of some kind. - if ((networkProperties.m_InputSource == MemorySource::Undefined && importEnabled) || - (networkProperties.m_InputSource != MemorySource::Undefined && !importEnabled)) - { - auto message = fmt::format("The input memory source specified, '{0}',", networkProperties.m_InputSource); - if (!importEnabled) - { - message.append(" requires that memory import be enabled. However, " - "it was disabled when this network was optimized."); - } - else - { - message.append(" requires that memory import be disabled. However, " - "it was enabled when this network was optimized."); - } - throw InvalidArgumentException(message); - } - - if ((networkProperties.m_OutputSource == MemorySource::Undefined && exportEnabled) || - (networkProperties.m_OutputSource != MemorySource::Undefined && !exportEnabled)) - { - auto message = fmt::format("The output memory source specified, '{0}',", networkProperties.m_OutputSource); - if (!exportEnabled) - { - message.append(" requires that memory export be enabled. However, " - "it was disabled when this network was optimized."); - } - else - { - message.append(" requires that memory export be disabled. However, " - "it was enabled when this network was optimized."); - } - throw InvalidArgumentException(message); - } -} // anonymous - std::unique_ptr LoadedNetwork::MakeLoadedNetwork(std::unique_ptr net, std::string& errorMessage, const INetworkProperties& networkProperties, @@ -217,11 +136,6 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr net, profiler->EnableNetworkDetailsToStdOut(networkProperties.m_OutputNetworkDetailsMethod); - // We need to check that the memory sources match up with the values of import and export specified during the - // optimize phase. If they don't this will throw an exception. - ValidateSourcesMatchOptimizedNetwork(m_OptimizedNetwork.get()->pOptimizedNetworkImpl->GetModelOptions(), - m_NetworkProperties); - //First create tensor handlers, backends and workload factories. //Handlers are created before workloads are. //Because workload creation can modify some of the handlers, @@ -1525,7 +1439,7 @@ std::vector LoadedNetwork::ImportInputs(const InputTensors& inp ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get(); - if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource)) + if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_InputSource)) { throw MemoryImportException( fmt::format("ImportInputs: Memory Import failed, backend: " @@ -1537,7 +1451,7 @@ std::vector LoadedNetwork::ImportInputs(const InputTensors& inp std::make_unique(inputTensor.second.GetInfo(), inputTensor.second.GetMemoryArea()); - if (tensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource)) + if (tensorHandle->Import(passThroughTensorHandle->Map(), m_NetworkProperties.m_InputSource)) { importedInputs.push_back(m_CurImportedInputId++); passThroughTensorHandle->Unmap(); @@ -1650,14 +1564,14 @@ std::vector LoadedNetwork::ImportOutputs(const OutputTensors& ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get(); - if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource)) + if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_OutputSource)) { throw MemoryImportException(fmt::format("ImportInputs: Memory Import failed, backend: " "{} does not support importing from source {}" - , factoryId, forceImportMemorySource)); + , factoryId, m_NetworkProperties.m_OutputSource)); } - if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource)) + if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), m_NetworkProperties.m_OutputSource)) { importedOutputs.push_back(m_CurImportedOutputId++); } diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 9520c1399e..f2ba94f597 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1362,7 +1362,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backen ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends, OutputSlot& outputSlot, TensorHandleFactoryRegistry& registry, - bool exportEnabled) + bool importEnabled) { // First ensure the from backends can support the TensorHandeAPI Layer& layer = outputSlot.GetOwningLayer(); @@ -1390,7 +1390,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends, std::map factoryScores; for (auto&& pref : srcPrefs) { - if (exportEnabled) + if (importEnabled) { ITensorHandleFactory* factory = registry.GetFactory(pref); if (outputConnection) @@ -1602,13 +1602,12 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph, BackendsMap& backends, TensorHandleFactoryRegistry& registry, bool importEnabled, - bool exportEnabled, Optional&> errMessages) { ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_SelectTensorHandleStrategy"); OptimizationResult result; - optGraph.ForEachLayer([&backends, ®istry, &result, &errMessages, importEnabled, exportEnabled](Layer* layer) + optGraph.ForEachLayer([&backends, ®istry, &result, &errMessages, importEnabled](Layer* layer) { ARMNN_ASSERT(layer); @@ -1633,7 +1632,7 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph, slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry); break; default: - slotOption = CalculateSlotOption(backends, outputSlot, registry, exportEnabled); + slotOption = CalculateSlotOption(backends, outputSlot, registry, importEnabled); break; } outputSlot.SetTensorHandleFactory(slotOption); @@ -1697,15 +1696,7 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph, std::unique_ptr graph = std::make_unique(inGraph); - // We need to pass on the information about whether import and export is enabled to the LoadNetwork phase. - // The mechanism to do that is to add model options to the optimized network. - armnn::BackendOptions importExport("Global", - {{"ImportEnabled", options.m_ImportEnabled}, - {"ExportEnabled", options.m_ExportEnabled}}); - ModelOptions optimizedOptions(options.m_ModelOptions); - optimizedOptions.push_back(importExport); - - auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), optimizedOptions), + auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions), &IOptimizedNetwork::Destroy); IOptimizedNetwork* optNetObjPtr = optNet.get(); @@ -1828,9 +1819,7 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph, backends, tensorHandleFactoryRegistry, options.m_ImportEnabled, - options.m_ExportEnabled, messages); - if (strategyResult.m_Error) { // Failed to apply the backend-specific optimizations diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 2d34cfc3e2..6c7c2f5c7e 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -300,7 +300,6 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph, BackendsMap& backends, TensorHandleFactoryRegistry& registry, bool importEnabled, - bool exportEnabled, Optional&> errMessages); OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr, diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp index f5dfadf948..376cdbc000 100644 --- a/src/armnn/Runtime.hpp +++ b/src/armnn/Runtime.hpp @@ -56,9 +56,9 @@ public: armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const; std::vector ImportInputs(NetworkId networkId, const InputTensors& inputTensors, - MemorySource forceImportMemorySource); + MemorySource forceImportMemorySource = MemorySource::Undefined); std::vector ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors, - MemorySource forceImportMemorySource); + MemorySource forceImportMemorySource = MemorySource::Undefined); void ClearImportedInputs(NetworkId networkId, const std::vector inputIds); void ClearImportedOutputs(NetworkId networkId, const std::vector outputIds); diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index 59f65541b8..3cbe8848df 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -93,7 +93,7 @@ TEST_CASE("RuntimePreImportInputs") std::vector backends = {armnn::Compute::CpuRef}; std::string er; - armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined); + armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Undefined); runtime->LoadNetwork(networkId, Optimize(*testNetwork, backends, runtime->GetDeviceSpec()), er, @@ -107,7 +107,7 @@ TEST_CASE("RuntimePreImportInputs") ConstTensor inputTensor2({{4}, armnn::DataType::Signed32, 0.0f, 0, true}, inputData2.data()); Tensor outputTensor({{4}, armnn::DataType::Signed32}, output.data()); - auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}}, MemorySource::Malloc); + auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}}); CHECK(importedInputVec1.size() == 1); CHECK(importedInputVec1[0] == 0); @@ -118,7 +118,7 @@ TEST_CASE("RuntimePreImportInputs") CHECK(val == 30); } - auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc); + auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}}); CHECK(importedInputVec2.size() == 1); CHECK(importedInputVec2[0] == 1); @@ -146,7 +146,7 @@ TEST_CASE("RuntimePreImportInputs") // Incorrect layer binding id and ImportedInputId CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{-2, inputTensor2}}, {{2, outputTensor}}, {10});, armnn::InvalidArgumentException); - auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc); + auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}}); CHECK(importedInputVec3[0] == 2); // Too many ImportedInputIds CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 1, 2});, @@ -175,7 +175,6 @@ TEST_CASE("RuntimePreImportInputs") // Trying to delete unknown pre-imported tensor CHECK_THROWS_AS(runtime->ClearImportedInputs(networkId, {10});, armnn::InvalidArgumentException); } - TEST_CASE("RuntimePreImportOutputs") { armnn::IRuntime::CreationOptions options; @@ -217,7 +216,7 @@ TEST_CASE("RuntimePreImportOutputs") std::vector backends = { armnn::Compute::CpuRef }; std::string er; - armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined); + armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Malloc); runtime->LoadNetwork(networkId, Optimize(*testNetwork, backends, runtime->GetDeviceSpec()), er, @@ -258,7 +257,7 @@ TEST_CASE("RuntimePreImportOutputs") runtime->Execute(*memHandle.get(),inputTensors, {output1, output2}); testOutputs(); - auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 }, MemorySource::Malloc); + auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 }); CHECK(importedOutputVec.size() == 2); CHECK(importedOutputVec[0] == 0); CHECK(importedOutputVec[1] == 1); @@ -272,7 +271,7 @@ TEST_CASE("RuntimePreImportOutputs") runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {0}); testOutputs(); - auto importedInputVec = runtime->ImportInputs(networkId, inputTensors, MemorySource::Malloc); + auto importedInputVec = runtime->ImportInputs(networkId, inputTensors); CHECK(importedInputVec.size() == 2); CHECK(importedInputVec[0] == 0); CHECK(importedInputVec[1] == 1); @@ -1294,176 +1293,4 @@ TEST_CASE("ProfilingPostOptimisationStructureCpuRef") VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuRef); } -TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn") -{ - // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load - // that network but specify that the import memory source is Malloc. - - armnn::IRuntime::CreationOptions options; - armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - armnn::NetworkId networkId = 1; - armnn::INetworkPtr testNetwork(armnn::INetwork::Create()); - - auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); - auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); - auto addLayer = testNetwork->AddAdditionLayer("add layer"); - auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); - - TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; - - inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0)); - inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1)); - inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - std::vector backends = {armnn::Compute::CpuRef}; - - OptimizerOptions optimizedOptions; - // Hard set import and export to off. - optimizedOptions.m_ImportEnabled = false; - optimizedOptions.m_ExportEnabled = false; - IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions); - CHECK(optNet); - - std::string er; - // Load the network passing an import memory source. - armnn::INetworkProperties networkProperties1(true, MemorySource::Malloc, MemorySource::Undefined); - // There should be an InvalidArgumentException. - runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1); - CHECK(er.find("However, it was disabled when this network was optimized") != -1); -} - -TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn") -{ - // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load - // that network but specify that the export memory source as Malloc. - - armnn::IRuntime::CreationOptions options; - armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - armnn::NetworkId networkId = 1; - armnn::INetworkPtr testNetwork(armnn::INetwork::Create()); - - auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); - auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); - auto addLayer = testNetwork->AddAdditionLayer("add layer"); - auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); - - TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; - - inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0)); - inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1)); - inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - std::vector backends = {armnn::Compute::CpuRef}; - - OptimizerOptions optimizedOptions; - // Hard set import and export to off. - optimizedOptions.m_ImportEnabled = false; - optimizedOptions.m_ExportEnabled = false; - IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions); - CHECK(optNet); - - std::string er; - // Load the network passing an import memory source. - armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Malloc); - // There should be an InvalidArgumentException. - runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1); - CHECK(er.find("However, it was disabled when this network was optimized") != -1); -} - -TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff") -{ - // In this test case we'll optimize a network with import enabled. Then we'll attempt to load - // that network but specify that the import memory source is Undefined. - - armnn::IRuntime::CreationOptions options; - armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - armnn::NetworkId networkId = 1; - armnn::INetworkPtr testNetwork(armnn::INetwork::Create()); - - auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); - auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); - auto addLayer = testNetwork->AddAdditionLayer("add layer"); - auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); - - TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; - - inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0)); - inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1)); - inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - std::vector backends = {armnn::Compute::CpuRef}; - - OptimizerOptions optimizedOptions; - // Hard set import and export to off. - optimizedOptions.m_ImportEnabled = true; - optimizedOptions.m_ExportEnabled = false; - IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions); - CHECK(optNet); - - std::string er; - // Load the network passing an import memory source. - armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined); - // There should be an InvalidArgumentException. - runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1); - CHECK(er.find("However, it was enabled when this network was optimized") != -1); -} - -TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff") -{ - // In this test case we'll optimize a network with export enabled. Then we'll attempt to load - // that network but specify that the export memory source is Undefined. - - armnn::IRuntime::CreationOptions options; - armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - armnn::NetworkId networkId = 1; - armnn::INetworkPtr testNetwork(armnn::INetwork::Create()); - - auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer"); - auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer"); - auto addLayer = testNetwork->AddAdditionLayer("add layer"); - auto outputLayer = testNetwork->AddOutputLayer(2, "output layer"); - - TensorInfo tensorInfo{{4}, armnn::DataType::Signed32}; - - inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0)); - inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1)); - inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo); - - std::vector backends = {armnn::Compute::CpuRef}; - - OptimizerOptions optimizedOptions; - // Hard set import and export to off. - optimizedOptions.m_ImportEnabled = false; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions); - CHECK(optNet); - - std::string er; - // Load the network passing an import memory source. - armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined); - // There should be an InvalidArgumentException. - runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1); - CHECK(er.find("However, it was enabled when this network was optimized") != -1); -} - } diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp index 2ea3c2abf1..c591fffa43 100644 --- a/src/armnn/test/TensorHandleStrategyTest.cpp +++ b/src/armnn/test/TensorHandleStrategyTest.cpp @@ -342,7 +342,7 @@ TEST_CASE("TensorHandleSelectionStrategy") graph.TopologicalSort(); std::vector errors; - auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors); + auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors); CHECK(result.m_Error == false); CHECK(result.m_Warning == false); diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp index 9c85ffcfc3..c69a4b5f91 100644 --- a/src/backends/backendsCommon/test/CompatibilityTests.cpp +++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp @@ -73,7 +73,7 @@ TEST_CASE("Neon_Cl_DirectCompatibility_Test") graph.TopologicalSort(); std::vector errors; - auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors); + auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors); CHECK(result.m_Error == false); CHECK(result.m_Warning == false); diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index cc5aa23ca3..77901df444 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -204,9 +204,7 @@ inline void ImportNonAlignedInputPointerTest(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); CHECK(optNet); // Loads it into the runtime. @@ -271,10 +269,7 @@ inline void ExportNonAlignedOutputPointerTest(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); CHECK(optNet); // Loads it into the runtime. @@ -345,10 +340,7 @@ inline void ImportAlignedPointerTest(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // Optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); CHECK(optNet); // Loads it into the runtime. @@ -432,9 +424,7 @@ inline void ImportOnlyWorkload(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); INFO("Load Network"); // Load it into the runtime. It should pass. @@ -524,9 +514,7 @@ inline void ExportOnlyWorkload(std::vector backends) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); // optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); INFO("Load Network"); // Load it into the runtime. It should pass. @@ -613,10 +601,7 @@ inline void ImportAndExportWorkload(std::vector backends) input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true)); pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32)); - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); INFO("Load Network"); // Load it into the runtime. It should pass. @@ -709,10 +694,7 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vectorGetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); // Optimize the network - OptimizerOptions optimizedOptions; - optimizedOptions.m_ImportEnabled = true; - optimizedOptions.m_ExportEnabled = true; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Loads it into the runtime. NetworkId netId; diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp index cd865def71..bcea0610db 100644 --- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp +++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp @@ -421,7 +421,7 @@ TEST_CASE("OptimizeNetworkCopy") std::vector preferredBackends { "CpuRef" }; armnn::ModelOptions modelOptions; - armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false); + armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions); std::vector errorMessages; // optimize the network. diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp index 251c98fcad..139e688dc2 100644 --- a/src/backends/cl/test/ClCustomAllocatorTests.cpp +++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp @@ -120,7 +120,6 @@ TEST_CASE("ClCustomAllocatorTest") // Optimise ArmNN network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"GpuAcc"}, run->GetDeviceSpec(), optOptions); CHECK(optNet); diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp index 51a983a681..6ac94337ba 100644 --- a/src/backends/cl/test/ClFallbackTests.cpp +++ b/src/backends/cl/test/ClFallbackTests.cpp @@ -50,7 +50,6 @@ TEST_CASE("ClImportEnabledFallbackToNeon") // optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -331,7 +330,6 @@ TEST_CASE("ClImportEnabledFallbackSubgraphToNeon") // optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp index 9a075d2b7d..20537b3c81 100644 --- a/src/backends/cl/test/ClImportTensorHandleTests.cpp +++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp @@ -142,7 +142,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd") // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -339,7 +338,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd") // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -472,7 +470,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -616,7 +613,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -751,7 +747,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16 // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -901,7 +896,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -1123,7 +1117,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver // Optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; std::vector backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp index 6648759a9a..cf17eae208 100644 --- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp +++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp @@ -130,7 +130,7 @@ TEST_CASE("FastMathEnabledTestOnGpuAcc") auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get()); - CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values. + CHECK(modelOptionsOut.size() == 1); CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled"); CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true); } diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp index 8e0e0ab99b..d2de843fd9 100644 --- a/src/backends/neon/test/NeonFallbackTests.cpp +++ b/src/backends/neon/test/NeonFallbackTests.cpp @@ -60,7 +60,6 @@ TEST_CASE("FallbackImportToCpuAcc") std::vector backends = { "MockRef", Compute::CpuAcc }; OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -204,7 +203,6 @@ TEST_CASE("FallbackPaddingCopyToCpuAcc") std::vector backends = { "MockRef", Compute::CpuAcc }; OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -340,7 +338,6 @@ TEST_CASE("FallbackImportFromCpuAcc") std::vector backends = { "MockRef", Compute::CpuAcc }; OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -485,7 +482,6 @@ TEST_CASE("FallbackPaddingCopyFromCpuAcc") std::vector backends = { "MockRef", Compute::CpuAcc }; OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -750,7 +746,6 @@ TEST_CASE("NeonImportEnabledFallbackToCl") // optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); @@ -1042,7 +1037,6 @@ TEST_CASE("NeonImportEnabledFallbackSubgraphToCl") // optimize the network OptimizerOptions optOptions; optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); Graph& graph = GetGraphForTesting(optNet.get()); diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp index dcda9bfd07..9b448b270d 100644 --- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp +++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp @@ -106,7 +106,7 @@ TEST_CASE("FastMathEnabledTestOnCpuAcc") auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get()); - CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values. + CHECK(modelOptionsOut.size() == 1); CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled"); CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true); } -- cgit v1.2.1