aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/graph2/GraphContext.h25
-rw-r--r--arm_compute/graph2/Types.h9
-rw-r--r--arm_compute/graph2/backends/Utils.h2
-rw-r--r--arm_compute/graph2/frontend/Stream.h10
-rw-r--r--arm_compute/graph2/frontend/Types.h1
-rw-r--r--examples/graph_alexnet.cpp7
-rw-r--r--examples/graph_googlenet.cpp13
-rw-r--r--examples/graph_inception_v3.cpp11
-rw-r--r--examples/graph_inception_v4.cpp11
-rw-r--r--examples/graph_lenet.cpp11
-rw-r--r--examples/graph_mobilenet.cpp7
-rw-r--r--examples/graph_resnet50.cpp11
-rw-r--r--examples/graph_squeezenet.cpp11
-rw-r--r--examples/graph_squeezenet_v1_1.cpp11
-rw-r--r--examples/graph_vgg16.cpp11
-rw-r--r--examples/graph_vgg19.cpp13
-rw-r--r--src/graph2/GraphContext.cpp20
-rw-r--r--src/graph2/backends/CL/CLDeviceBackend.cpp2
-rw-r--r--src/graph2/backends/NEON/NEDeviceBackend.cpp5
-rw-r--r--src/graph2/frontend/Stream.cpp5
20 files changed, 99 insertions, 97 deletions
diff --git a/arm_compute/graph2/GraphContext.h b/arm_compute/graph2/GraphContext.h
index 72ed96e7a0..f38e25dd61 100644
--- a/arm_compute/graph2/GraphContext.h
+++ b/arm_compute/graph2/GraphContext.h
@@ -56,26 +56,18 @@ public:
GraphContext &operator=(const GraphContext &) = delete;
/** Default move assignment operator */
GraphContext &operator=(GraphContext &&) = default;
- /** Enables tuning
+ /** Graph configuration accessor
*
- * @param[in] enable_tuning Enables tuning if true
- */
- void enable_tuning(bool enable_tuning);
- /** Checks if tuning is enabled
- *
- * @return True if tuning is enabled else false
- */
- bool is_tuning_enabled() const;
- /** Enables memory management
+ * @note Every alteration has to be done before graph finalization
*
- * @param[in] enable_mm Enables mm if true
+ * @return The graph configuration
*/
- void enable_memory_managenent(bool enable_mm);
- /** Checks if memory management is enabled
+ const GraphConfig &config() const;
+ /** Sets graph configuration
*
- * @return True if memory management is enabled else false
+ * @param[in] config Configuration to use
*/
- bool is_memory_management_enabled();
+ void set_config(const GraphConfig &config);
/** Inserts a memory manager context
*
* @param[in] memory_ctx Memory manage context
@@ -94,8 +86,7 @@ public:
void finalize();
private:
- bool _tunable; /**< Specifies if the Graph should use a tunable object */
- bool _memory_managed; /**< Specifies if the Graph should use a memory managed */
+ GraphConfig _config; /**< Graph configuration */
std::map<Target, MemoryManagerContext> _memory_managers; /**< Memory managers for each target */
};
} // namespace graph2
diff --git a/arm_compute/graph2/Types.h b/arm_compute/graph2/Types.h
index 4cbfc7267b..b619cf1673 100644
--- a/arm_compute/graph2/Types.h
+++ b/arm_compute/graph2/Types.h
@@ -71,6 +71,15 @@ constexpr EdgeID EmptyEdgeID = std::numeric_limits<EdgeID>::max();
// Forward declarations
class TensorDescriptor;
+/** Graph configuration structure */
+struct GraphConfig
+{
+ bool use_function_memory_manager{ false }; /**< Use a memory manager to manage per-funcion auxilary memory */
+ bool use_transition_memory_manager{ false }; /**< Use a memory manager to manager transition buffer memory */
+ bool use_tuner{ false }; /**< Use a tuner in tunable backends */
+ unsigned int num_threads{ 0 }; /**< Number of threads to use (thread capable backends), if 0 the backend will auto-initialize */
+};
+
/**< Data layout format */
enum class DataLayout
{
diff --git a/arm_compute/graph2/backends/Utils.h b/arm_compute/graph2/backends/Utils.h
index cc6f5163f2..bba75757eb 100644
--- a/arm_compute/graph2/backends/Utils.h
+++ b/arm_compute/graph2/backends/Utils.h
@@ -87,7 +87,7 @@ inline bool is_in_place_operation(void *input, void *output)
*/
inline std::shared_ptr<IMemoryManager> get_memory_manager(GraphContext &ctx, Target target)
{
- bool enabled = ctx.is_memory_management_enabled() && (ctx.memory_management_ctx(target) != nullptr);
+ bool enabled = ctx.config().use_function_memory_manager && (ctx.memory_management_ctx(target) != nullptr);
return enabled ? ctx.memory_management_ctx(target)->mm : nullptr;
}
} // namespace backends
diff --git a/arm_compute/graph2/frontend/Stream.h b/arm_compute/graph2/frontend/Stream.h
index 6100975958..bfefe12225 100644
--- a/arm_compute/graph2/frontend/Stream.h
+++ b/arm_compute/graph2/frontend/Stream.h
@@ -61,14 +61,10 @@ public:
Stream &operator=(Stream &&) = default;
/** Finalizes the stream for an execution target
*
- * @note enable_tuning only works if the target is OpenCL.
- * @note tuning increases the execution time of first run of the graph
- *
- * @param[in] target Execution target
- * @param[in] enable_tuning (Optional) Enables the tuning interface. Defaults to false
- * @param[in] enable_memory_management (Optional) Enables the memory management interface. Defaults to false
+ * @param[in] target Execution target
+ * @param[in] config (Optional) Graph configuration to use
*/
- void finalize(Target target, bool enable_tuning = false, bool enable_memory_management = false);
+ void finalize(Target target, const GraphConfig &config);
/** Executes the stream **/
void run();
diff --git a/arm_compute/graph2/frontend/Types.h b/arm_compute/graph2/frontend/Types.h
index 234b998126..d433d1547b 100644
--- a/arm_compute/graph2/frontend/Types.h
+++ b/arm_compute/graph2/frontend/Types.h
@@ -47,6 +47,7 @@ using graph2::ConvolutionMethod;
using graph2::DepthwiseConvolutionMethod;
using graph2::TensorDescriptor;
using graph2::DimensionRoundingType;
+using graph2::GraphConfig;
/** Branch layer merging method */
enum class BranchMergeMethod
diff --git a/examples/graph_alexnet.cpp b/examples/graph_alexnet.cpp
index 6ba3ebc7ae..885db337e9 100644
--- a/examples/graph_alexnet.cpp
+++ b/examples/graph_alexnet.cpp
@@ -55,8 +55,6 @@ public:
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
Target target_hint = set_target_hint2(target);
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
// TODO (geopin01) : Get GPU target somehow and set gemm also for midgard ?
const bool is_gemm_convolution5x5 = (target_hint == Target::NEON);
@@ -163,7 +161,10 @@ public:
<< OutputLayer(get_output_accessor(label, 5));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
}
void do_run() override
{
diff --git a/examples/graph_googlenet.cpp b/examples/graph_googlenet.cpp
index e97f3acdfd..d6e76fdced 100644
--- a/examples/graph_googlenet.cpp
+++ b/examples/graph_googlenet.cpp
@@ -52,11 +52,9 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint2(target);
- ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
// Parse arguments
if(argc < 2)
@@ -136,7 +134,10 @@ public:
<< OutputLayer(get_output_accessor(label, 5));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
}
void do_run() override
{
diff --git a/examples/graph_inception_v3.cpp b/examples/graph_inception_v3.cpp
index 73a4450a4f..5f049d0f4a 100644
--- a/examples/graph_inception_v3.cpp
+++ b/examples/graph_inception_v3.cpp
@@ -51,10 +51,8 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint2(target);
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
@@ -192,7 +190,10 @@ public:
<< OutputLayer(get_output_accessor(label, 5));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
}
void do_run() override
diff --git a/examples/graph_inception_v4.cpp b/examples/graph_inception_v4.cpp
index 88073b7efb..847c5b8250 100644
--- a/examples/graph_inception_v4.cpp
+++ b/examples/graph_inception_v4.cpp
@@ -55,10 +55,8 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
// Set target. 0 (NEON), 1 (OpenCL). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint2(target);
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
@@ -162,7 +160,10 @@ public:
<< OutputLayer(get_output_accessor(label, 5));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
#else /* __aarch64__ */
using namespace arm_compute;
ARM_COMPUTE_UNUSED(argc);
diff --git a/examples/graph_lenet.cpp b/examples/graph_lenet.cpp
index e4b8effe5d..3803da9b83 100644
--- a/examples/graph_lenet.cpp
+++ b/examples/graph_lenet.cpp
@@ -47,10 +47,8 @@ public:
unsigned int batches = 4; /** Number of batches */
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint2(target);
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
// Parse arguments
if(argc < 2)
@@ -106,7 +104,10 @@ public:
<< OutputLayer(get_output_accessor(""));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
}
void do_run() override
{
diff --git a/examples/graph_mobilenet.cpp b/examples/graph_mobilenet.cpp
index 4d01055c50..bd25b927fe 100644
--- a/examples/graph_mobilenet.cpp
+++ b/examples/graph_mobilenet.cpp
@@ -54,8 +54,6 @@ public:
Target target_hint = set_target_hint2(target);
ConvolutionMethod convolution_hint = ConvolutionMethod::GEMM;
DepthwiseConvolutionMethod depthwise_convolution_hint = DepthwiseConvolutionMethod::OPTIMIZED_3x3;
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
// Set model to execute. 0 (MobileNetV1_1.0_224), 1 (MobileNetV1_0.75_160)
int model_id = (argc > 2) ? std::strtol(argv[2], nullptr, 10) : 0;
@@ -150,7 +148,10 @@ public:
<< OutputLayer(get_output_accessor(label, 5));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
}
void do_run() override
{
diff --git a/examples/graph_resnet50.cpp b/examples/graph_resnet50.cpp
index a7d7abc761..ec447de66f 100644
--- a/examples/graph_resnet50.cpp
+++ b/examples/graph_resnet50.cpp
@@ -52,10 +52,8 @@ public:
false /* Do not convert to BGR */);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint2(target);
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
@@ -125,7 +123,10 @@ public:
<< OutputLayer(get_output_accessor(label, 5));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
}
void do_run() override
diff --git a/examples/graph_squeezenet.cpp b/examples/graph_squeezenet.cpp
index 92e6a38fcd..ddbe6b43ce 100644
--- a/examples/graph_squeezenet.cpp
+++ b/examples/graph_squeezenet.cpp
@@ -53,10 +53,8 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint2(target);
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
@@ -173,7 +171,10 @@ public:
<< OutputLayer(get_output_accessor(label, 5));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
}
void do_run() override
{
diff --git a/examples/graph_squeezenet_v1_1.cpp b/examples/graph_squeezenet_v1_1.cpp
index 540784e4cd..faab79fcf1 100644
--- a/examples/graph_squeezenet_v1_1.cpp
+++ b/examples/graph_squeezenet_v1_1.cpp
@@ -56,10 +56,8 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint2(target);
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
ConvolutionMethod convolution_hint = (target_hint == Target::CL) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
@@ -177,7 +175,10 @@ public:
<< OutputLayer(get_output_accessor(label, 5));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
}
void do_run() override
{
diff --git a/examples/graph_vgg16.cpp b/examples/graph_vgg16.cpp
index 516b7b18f0..23742ed771 100644
--- a/examples/graph_vgg16.cpp
+++ b/examples/graph_vgg16.cpp
@@ -65,10 +65,8 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint2(target);
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
// Check if we can use GEMM-based convolutions evaluating if the platform has at least 1.8 GB of available memory
const size_t memory_required = 1932735283L;
@@ -231,7 +229,10 @@ public:
<< OutputLayer(get_output_accessor(label, 5));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
}
void do_run() override
{
diff --git a/examples/graph_vgg19.cpp b/examples/graph_vgg19.cpp
index 55502e0e00..f282b90630 100644
--- a/examples/graph_vgg19.cpp
+++ b/examples/graph_vgg19.cpp
@@ -51,11 +51,9 @@ public:
std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
// Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
- const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
- Target target_hint = set_target_hint2(target);
- ConvolutionMethod convolution_hint = ConvolutionMethod::DIRECT;
- bool enable_tuning = (target == 2);
- bool enable_memory_management = true;
+ const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
+ Target target_hint = set_target_hint2(target);
+ ConvolutionMethod convolution_hint = ConvolutionMethod::DIRECT;
// Parse arguments
if(argc < 2)
@@ -221,7 +219,10 @@ public:
<< OutputLayer(get_output_accessor(label, 5));
// Finalize graph
- graph.finalize(target_hint, enable_tuning, enable_memory_management);
+ GraphConfig config;
+ config.use_function_memory_manager = true;
+ config.use_tuner = (target == 2);
+ graph.finalize(target_hint, config);
}
void do_run() override
{
diff --git a/src/graph2/GraphContext.cpp b/src/graph2/GraphContext.cpp
index 88fc5216a4..08a7b68dce 100644
--- a/src/graph2/GraphContext.cpp
+++ b/src/graph2/GraphContext.cpp
@@ -29,28 +29,18 @@ namespace arm_compute
namespace graph2
{
GraphContext::GraphContext()
- : _tunable(false), _memory_managed(false), _memory_managers()
+ : _config(), _memory_managers()
{
}
-void GraphContext::enable_tuning(bool enable_tuning)
+const GraphConfig &GraphContext::config() const
{
- _tunable = enable_tuning;
+ return _config;
}
-bool GraphContext::is_tuning_enabled() const
+void GraphContext::set_config(const GraphConfig &config)
{
- return _tunable;
-}
-
-void GraphContext::enable_memory_managenent(bool enable_mm)
-{
- _memory_managed = enable_mm;
-}
-
-bool GraphContext::is_memory_management_enabled()
-{
- return _memory_managed;
+ _config = config;
}
bool GraphContext::insert_memory_management_ctx(MemoryManagerContext &&memory_ctx)
diff --git a/src/graph2/backends/CL/CLDeviceBackend.cpp b/src/graph2/backends/CL/CLDeviceBackend.cpp
index 6d2d4f9b1a..71566d2f1f 100644
--- a/src/graph2/backends/CL/CLDeviceBackend.cpp
+++ b/src/graph2/backends/CL/CLDeviceBackend.cpp
@@ -101,7 +101,7 @@ void CLDeviceBackend::initialize_backend()
void CLDeviceBackend::setup_backend_context(GraphContext &ctx)
{
// Setup tuner
- set_kernel_tuning(ctx.is_tuning_enabled());
+ set_kernel_tuning(ctx.config().use_tuner);
// Setup a management backend
if(ctx.memory_management_ctx(Target::CL) == nullptr)
diff --git a/src/graph2/backends/NEON/NEDeviceBackend.cpp b/src/graph2/backends/NEON/NEDeviceBackend.cpp
index 9010c5d802..6cb507b4f1 100644
--- a/src/graph2/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph2/backends/NEON/NEDeviceBackend.cpp
@@ -40,6 +40,7 @@
#include "arm_compute/runtime/MemoryManagerOnDemand.h"
#include "arm_compute/runtime/OffsetLifetimeManager.h"
#include "arm_compute/runtime/PoolManager.h"
+#include "arm_compute/runtime/Scheduler.h"
#include "support/ToolchainSupport.h"
@@ -63,6 +64,10 @@ void NEDeviceBackend::initialize_backend()
void NEDeviceBackend::setup_backend_context(GraphContext &ctx)
{
+ // Set number of threads
+ Scheduler::get().set_num_threads(ctx.config().num_threads);
+
+ // Create function level memory manager
if(ctx.memory_management_ctx(Target::NEON) == nullptr)
{
MemoryManagerContext mm_ctx;
diff --git a/src/graph2/frontend/Stream.cpp b/src/graph2/frontend/Stream.cpp
index 076b9ac11f..3386d6547f 100644
--- a/src/graph2/frontend/Stream.cpp
+++ b/src/graph2/frontend/Stream.cpp
@@ -37,11 +37,10 @@ Stream::Stream(size_t id, std::string name)
{
}
-void Stream::finalize(Target target, bool enable_tuning, bool enable_memory_management)
+void Stream::finalize(Target target, const GraphConfig &config)
{
PassManager pm = create_default_pass_manager();
- _ctx.enable_tuning(enable_tuning);
- _ctx.enable_memory_managenent(enable_memory_management);
+ _ctx.set_config(config);
_manager.finalize_graph(_g, _ctx, pm, target);
}