aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl/test')
-rw-r--r--src/backends/cl/test/ClContextSerializerTests.cpp10
-rw-r--r--src/backends/cl/test/ClCustomAllocatorTests.cpp12
-rw-r--r--src/backends/cl/test/ClFallbackTests.cpp16
-rw-r--r--src/backends/cl/test/ClImportTensorHandleTests.cpp44
-rw-r--r--src/backends/cl/test/ClOptimizedNetworkTests.cpp10
5 files changed, 46 insertions, 46 deletions
diff --git a/src/backends/cl/test/ClContextSerializerTests.cpp b/src/backends/cl/test/ClContextSerializerTests.cpp
index 862ed2ecab..81a66145d9 100644
--- a/src/backends/cl/test/ClContextSerializerTests.cpp
+++ b/src/backends/cl/test/ClContextSerializerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -88,14 +88,14 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextSerializerTest")
armnn::INetworkPtr net2 = CreateNetwork();
// Add specific optimizerOptions to each network.
- armnn::OptimizerOptions optimizerOptions1;
- armnn::OptimizerOptions optimizerOptions2;
+ armnn::OptimizerOptionsOpaque optimizerOptions1;
+ armnn::OptimizerOptionsOpaque optimizerOptions2;
armnn::BackendOptions modelOptions1("GpuAcc",
{{"SaveCachedNetwork", true}, {"CachedNetworkFilePath", filePathString}});
armnn::BackendOptions modelOptions2("GpuAcc",
{{"SaveCachedNetwork", false}, {"CachedNetworkFilePath", filePathString}});
- optimizerOptions1.m_ModelOptions.push_back(modelOptions1);
- optimizerOptions2.m_ModelOptions.push_back(modelOptions2);
+ optimizerOptions1.AddModelOption(modelOptions1);
+ optimizerOptions2.AddModelOption(modelOptions2);
armnn::IOptimizedNetworkPtr optNet1 = armnn::Optimize(
*net1, backends, runtime->GetDeviceSpec(), optimizerOptions1);
diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp
index 251c98fcad..1cc2c4c95a 100644
--- a/src/backends/cl/test/ClCustomAllocatorTests.cpp
+++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -118,9 +118,9 @@ TEST_CASE("ClCustomAllocatorTest")
IRuntimePtr run = IRuntime::Create(options);
// Optimise ArmNN network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"GpuAcc"}, run->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -188,8 +188,8 @@ TEST_CASE("ClCustomAllocatorCpuAccNegativeTest")
INetworkPtr myNetwork = CreateTestNetwork(inputTensorInfo);
// Optimise ArmNN network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
IOptimizedNetworkPtr optNet(nullptr, nullptr);
std::vector<std::string> errMessages;
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 9443116c92..acba449e18 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -48,9 +48,9 @@ TEST_CASE("ClImportEnabledFallbackToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -196,7 +196,7 @@ TEST_CASE("ClImportDisabledFallbackToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
+ OptimizerOptionsOpaque optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -329,9 +329,9 @@ TEST_CASE("ClImportEnabledFallbackSubgraphToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -488,7 +488,7 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
+ OptimizerOptionsOpaque optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 1198cade61..39619e6421 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -140,9 +140,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd")
activation->GetOutputSlot(0).SetTensorInfo(tensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -337,9 +337,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd")
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -473,9 +473,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -621,9 +621,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -760,9 +760,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -912,9 +912,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -1138,9 +1138,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index 6648759a9a..3d4341df18 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -86,8 +86,8 @@ TEST_CASE("FP16TurboModeTestOnGpuAcc")
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- armnn::OptimizerOptions optimizerOptions;
- optimizerOptions.m_ReduceFp32ToFp16 = true;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
+ optimizerOptions.SetReduceFp32ToFp16(true);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
@@ -119,9 +119,9 @@ TEST_CASE("FastMathEnabledTestOnGpuAcc")
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- armnn::OptimizerOptions optimizerOptions;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
- optimizerOptions.m_ModelOptions.push_back(modelOptions);
+ optimizerOptions.AddModelOption(modelOptions);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);