aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test/EndToEndTestImpl.hpp')
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp32
1 files changed, 7 insertions, 25 deletions
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index cc5aa23ca3..77901df444 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -204,9 +204,7 @@ inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
CHECK(optNet);
// Loads it into the runtime.
@@ -271,10 +269,7 @@ inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
CHECK(optNet);
// Loads it into the runtime.
@@ -345,10 +340,7 @@ inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
CHECK(optNet);
// Loads it into the runtime.
@@ -432,9 +424,7 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
INFO("Load Network");
// Load it into the runtime. It should pass.
@@ -524,9 +514,7 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
INFO("Load Network");
// Load it into the runtime. It should pass.
@@ -613,10 +601,7 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
INFO("Load Network");
// Load it into the runtime. It should pass.
@@ -709,10 +694,7 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<Backend
activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
// Loads it into the runtime.
NetworkId netId;