aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp36
-rw-r--r--src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp8
-rw-r--r--src/backends/cl/test/ClContextSerializerTests.cpp10
-rw-r--r--src/backends/cl/test/ClCustomAllocatorTests.cpp12
-rw-r--r--src/backends/cl/test/ClFallbackTests.cpp16
-rw-r--r--src/backends/cl/test/ClImportTensorHandleTests.cpp44
-rw-r--r--src/backends/cl/test/ClOptimizedNetworkTests.cpp10
-rw-r--r--src/backends/neon/test/NeonFallbackTests.cpp40
-rw-r--r--src/backends/neon/test/NeonOptimizedNetworkTests.cpp15
-rw-r--r--src/backends/reference/test/RefOptimizedNetworkTests.cpp4
11 files changed, 102 insertions, 99 deletions
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 73cef16aad..bd5466ac04 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -210,8 +210,8 @@ inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -278,9 +278,9 @@ inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -353,9 +353,9 @@ inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -441,8 +441,8 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
INFO("Load Network");
@@ -531,8 +531,8 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
INFO("Load Network");
@@ -620,9 +620,9 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
INFO("Load Network");
@@ -714,9 +714,9 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<Backend
activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
// Loads it into the runtime.
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 226e2b3364..c5f9869298 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -157,8 +157,8 @@ std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends
softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// optimize the network
- armnn::OptimizerOptions optOptions;
- optOptions.m_ProfilingEnabled = true;
+ armnn::OptimizerOptionsOpaque optOptions;
+ optOptions.SetProfilingEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
if(!optNet)
{
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 5e619df8dd..ce1eea4194 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -93,7 +93,7 @@ TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
try
{
- Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptionsOpaque(), errMessages);
FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException&)
@@ -213,7 +213,8 @@ TEST_CASE("OptimizeValidateWorkloadsUndefinedComputeDevice")
try
{
- Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(),
+ armnn::OptimizerOptionsOpaque(), errMessages);
FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException&)
@@ -421,7 +422,8 @@ TEST_CASE("OptimizeNetworkCopy")
std::vector<armnn::BackendId> preferredBackends { "CpuRef" };
armnn::ModelOptions modelOptions;
- armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+ armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false,
+ false, modelOptions, false);
std::vector<std::string> errorMessages;
// optimize the network.
diff --git a/src/backends/cl/test/ClContextSerializerTests.cpp b/src/backends/cl/test/ClContextSerializerTests.cpp
index 862ed2ecab..81a66145d9 100644
--- a/src/backends/cl/test/ClContextSerializerTests.cpp
+++ b/src/backends/cl/test/ClContextSerializerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -88,14 +88,14 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextSerializerTest")
armnn::INetworkPtr net2 = CreateNetwork();
// Add specific optimizerOptions to each network.
- armnn::OptimizerOptions optimizerOptions1;
- armnn::OptimizerOptions optimizerOptions2;
+ armnn::OptimizerOptionsOpaque optimizerOptions1;
+ armnn::OptimizerOptionsOpaque optimizerOptions2;
armnn::BackendOptions modelOptions1("GpuAcc",
{{"SaveCachedNetwork", true}, {"CachedNetworkFilePath", filePathString}});
armnn::BackendOptions modelOptions2("GpuAcc",
{{"SaveCachedNetwork", false}, {"CachedNetworkFilePath", filePathString}});
- optimizerOptions1.m_ModelOptions.push_back(modelOptions1);
- optimizerOptions2.m_ModelOptions.push_back(modelOptions2);
+ optimizerOptions1.AddModelOption(modelOptions1);
+ optimizerOptions2.AddModelOption(modelOptions2);
armnn::IOptimizedNetworkPtr optNet1 = armnn::Optimize(
*net1, backends, runtime->GetDeviceSpec(), optimizerOptions1);
diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp
index 251c98fcad..1cc2c4c95a 100644
--- a/src/backends/cl/test/ClCustomAllocatorTests.cpp
+++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -118,9 +118,9 @@ TEST_CASE("ClCustomAllocatorTest")
IRuntimePtr run = IRuntime::Create(options);
// Optimise ArmNN network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"GpuAcc"}, run->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -188,8 +188,8 @@ TEST_CASE("ClCustomAllocatorCpuAccNegativeTest")
INetworkPtr myNetwork = CreateTestNetwork(inputTensorInfo);
// Optimise ArmNN network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
IOptimizedNetworkPtr optNet(nullptr, nullptr);
std::vector<std::string> errMessages;
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 9443116c92..acba449e18 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -48,9 +48,9 @@ TEST_CASE("ClImportEnabledFallbackToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -196,7 +196,7 @@ TEST_CASE("ClImportDisabledFallbackToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
+ OptimizerOptionsOpaque optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -329,9 +329,9 @@ TEST_CASE("ClImportEnabledFallbackSubgraphToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -488,7 +488,7 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
+ OptimizerOptionsOpaque optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 1198cade61..39619e6421 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -140,9 +140,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd")
activation->GetOutputSlot(0).SetTensorInfo(tensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -337,9 +337,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd")
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -473,9 +473,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -621,9 +621,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -760,9 +760,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -912,9 +912,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -1138,9 +1138,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index 6648759a9a..3d4341df18 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -86,8 +86,8 @@ TEST_CASE("FP16TurboModeTestOnGpuAcc")
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- armnn::OptimizerOptions optimizerOptions;
- optimizerOptions.m_ReduceFp32ToFp16 = true;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
+ optimizerOptions.SetReduceFp32ToFp16(true);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
@@ -119,9 +119,9 @@ TEST_CASE("FastMathEnabledTestOnGpuAcc")
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- armnn::OptimizerOptions optimizerOptions;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
- optimizerOptions.m_ModelOptions.push_back(modelOptions);
+ optimizerOptions.AddModelOption(modelOptions);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index 40df2dc315..eeb8107d49 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -58,9 +58,9 @@ TEST_CASE("FallbackImportToCpuAcc")
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -202,9 +202,9 @@ TEST_CASE("FallbackPaddingCopyToCpuAcc")
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -338,9 +338,9 @@ TEST_CASE("FallbackImportFromCpuAcc")
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -483,9 +483,9 @@ TEST_CASE("FallbackPaddingCopyFromCpuAcc")
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -748,9 +748,9 @@ TEST_CASE("NeonImportEnabledFallbackToCl")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -901,7 +901,7 @@ TEST_CASE("NeonImportDisabledFallbackToCl")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
+ OptimizerOptionsOpaque optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -1040,9 +1040,9 @@ TEST_CASE("NeonImportEnabledFallbackSubgraphToCl")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -1204,7 +1204,7 @@ TEST_CASE("NeonImportDisableFallbackSubgraphToCl")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
+ OptimizerOptionsOpaque optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index dcda9bfd07..4b700b034c 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -71,7 +71,8 @@ TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
try
{
- Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(),
+ armnn::OptimizerOptionsOpaque(), errMessages);
FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException& e)
@@ -95,9 +96,9 @@ TEST_CASE("FastMathEnabledTestOnCpuAcc")
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- armnn::OptimizerOptions optimizerOptions;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
armnn::BackendOptions modelOptions("CpuAcc", {{"FastMathEnabled", true}});
- optimizerOptions.m_ModelOptions.push_back(modelOptions);
+ optimizerOptions.AddModelOption(modelOptions);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
@@ -127,16 +128,16 @@ TEST_CASE("NumberOfThreadsTestOnCpuAcc")
unsigned int numberOfThreads = 2;
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- armnn::OptimizerOptions optimizerOptions;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
armnn::BackendOptions modelOptions("CpuAcc", {{"NumberOfThreads", numberOfThreads}});
- optimizerOptions.m_ModelOptions.push_back(modelOptions);
+ optimizerOptions.AddModelOption(modelOptions);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
CHECK(optimizedNet);
std::unique_ptr<armnn::Graph> graphPtr;
- armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
+ armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.GetModelOptions());
auto modelOptionsOut = impl.GetModelOptions();
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index 7e8064fc76..b4a135ffba 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -187,8 +187,8 @@ TEST_CASE("DebugTestOnCpuRef")
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- armnn::OptimizerOptions optimizerOptions;
- optimizerOptions.m_Debug = true;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
+ optimizerOptions.SetDebugEnabled(true);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
optimizerOptions);