aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/test/ClImportTensorHandleTests.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl/test/ClImportTensorHandleTests.cpp')
-rw-r--r--src/backends/cl/test/ClImportTensorHandleTests.cpp44
1 files changed, 22 insertions, 22 deletions
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 1198cade61..39619e6421 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -140,9 +140,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd")
activation->GetOutputSlot(0).SetTensorInfo(tensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -337,9 +337,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd")
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -473,9 +473,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -621,9 +621,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -760,9 +760,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -912,9 +912,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -1138,9 +1138,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);