diff options
author | John Mcloughlin <john.mcloughlin@arm.com> | 2023-03-24 12:07:25 +0000 |
---|---|---|
committer | Francis Murtagh <francis.murtagh@arm.com> | 2023-04-12 18:28:23 +0100 |
commit | c5ee0d7460f1e0ec7e2b0639e3e8962934c4df09 (patch) | |
tree | 931f1403589c34fd2de6b94d95e9e172a92424fe /src/backends/cl/test/ClImportTensorHandleTests.cpp | |
parent | ca5c82af9269e7fd7ed17c7df9780a75fdaa733e (diff) | |
download | armnn-c5ee0d7460f1e0ec7e2b0639e3e8962934c4df09.tar.gz |
IVGCVSW-7197 Implement Pimpl Idiom for OptimizerOptions
Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com>
Change-Id: Id4bdc31e3e6f18ccaef232c29a2d2825c915b21c
Diffstat (limited to 'src/backends/cl/test/ClImportTensorHandleTests.cpp')
-rw-r--r-- | src/backends/cl/test/ClImportTensorHandleTests.cpp | 44 |
1 files changed, 22 insertions, 22 deletions
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp index 1198cade61..39619e6421 100644 --- a/src/backends/cl/test/ClImportTensorHandleTests.cpp +++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -140,9 +140,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd") activation->GetOutputSlot(0).SetTensorInfo(tensorInfo); // Optimize the network - OptimizerOptions optOptions; - optOptions.m_ImportEnabled = true; - optOptions.m_ExportEnabled = true; + OptimizerOptionsOpaque optOptions; + optOptions.SetImportEnabled(true); + optOptions.SetExportEnabled(true); std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -337,9 +337,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd") convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); // Optimize the network - OptimizerOptions optOptions; - optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; + OptimizerOptionsOpaque optOptions; + optOptions.SetImportEnabled(false); + optOptions.SetExportEnabled(false); std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -473,9 +473,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); // Optimize the network - OptimizerOptions optOptions; - optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; + OptimizerOptionsOpaque optOptions; + optOptions.SetImportEnabled(false); + optOptions.SetExportEnabled(false); std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -621,9 +621,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); // Optimize the network - OptimizerOptions optOptions; - optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; + OptimizerOptionsOpaque optOptions; + optOptions.SetImportEnabled(false); + optOptions.SetExportEnabled(false); std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -760,9 +760,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16 convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); // Optimize the network - OptimizerOptions optOptions; - optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; + OptimizerOptionsOpaque optOptions; + optOptions.SetImportEnabled(false); + optOptions.SetExportEnabled(false); std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -912,9 +912,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); // Optimize the network - OptimizerOptions optOptions; - optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; + OptimizerOptionsOpaque optOptions; + optOptions.SetImportEnabled(false); + optOptions.SetExportEnabled(false); std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); @@ -1138,9 +1138,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); // Optimize the network - OptimizerOptions optOptions; - optOptions.m_ImportEnabled = false; - optOptions.m_ExportEnabled = false; + OptimizerOptionsOpaque optOptions; + optOptions.SetImportEnabled(false); + optOptions.SetExportEnabled(false); std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions); CHECK(optNet); |