aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp36
-rw-r--r--src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp8
3 files changed, 26 insertions, 24 deletions
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 73cef16aad..bd5466ac04 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -210,8 +210,8 @@ inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -278,9 +278,9 @@ inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -353,9 +353,9 @@ inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -441,8 +441,8 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
INFO("Load Network");
@@ -531,8 +531,8 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
INFO("Load Network");
@@ -620,9 +620,9 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
INFO("Load Network");
@@ -714,9 +714,9 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<Backend
activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
// Loads it into the runtime.
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 226e2b3364..c5f9869298 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -157,8 +157,8 @@ std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends
softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// optimize the network
- armnn::OptimizerOptions optOptions;
- optOptions.m_ProfilingEnabled = true;
+ armnn::OptimizerOptionsOpaque optOptions;
+ optOptions.SetProfilingEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
if(!optNet)
{
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 5e619df8dd..ce1eea4194 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -93,7 +93,7 @@ TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
try
{
- Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptionsOpaque(), errMessages);
FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException&)
@@ -213,7 +213,8 @@ TEST_CASE("OptimizeValidateWorkloadsUndefinedComputeDevice")
try
{
- Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(),
+ armnn::OptimizerOptionsOpaque(), errMessages);
FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException&)
@@ -421,7 +422,8 @@ TEST_CASE("OptimizeNetworkCopy")
std::vector<armnn::BackendId> preferredBackends { "CpuRef" };
armnn::ModelOptions modelOptions;
- armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+ armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false,
+ false, modelOptions, false);
std::vector<std::string> errorMessages;
// optimize the network.