ArmNN
 21.02
armnn_external_delegate.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "armnn_delegate.hpp"
6 #include <armnn/Logging.hpp>
7 
8 #include <iostream>
9 #include <tensorflow/lite/minimal_logging.h>
10 
11 namespace tflite
12 {
13 
14 /**
15  * This file defines two symbols that need to be exported to use the TFLite external delegate provider. This is a plugin
16  * that can be used for fast integration of delegates into benchmark tests and other tools. It allows loading of
17  * a dynamic delegate library at runtime.
18  *
19  * The external delegate also has Tensorflow Lite Python bindings. Therefore the dynamic external delegate
20  * can be directly used with Tensorflow Lite Python APIs.
21  *
22  * See tensorflow/lite/delegates/external for details or visit the tensorflow guide
23  * [here](https://www.tensorflow.org/lite/performance/implementing_delegate#option_2_leverage_external_delegate)
24  */
25 
26 extern "C"
27 {
28 std::vector<std::string> gpu_options {"gpu-tuning-level",
29  "gpu-tuning-file",
30  "gpu-kernel-profiling-enabled"};
31 
32 
33 /**
34  * Create an ArmNN delegate plugin
35  *
36  * Available options:
37  *
38  * Option key: "backends" \n
39  * Possible values: ["EthosNPU"/"GpuAcc"/"CpuAcc"/"CpuRef"] \n
40  * Descriptions: A comma separated list without whitespaces of
41  * backends which should be used for execution. Falls
42  * back to next backend in list if previous doesn't
43  * provide support for operation. e.g. "GpuAcc,CpuAcc"
44  *
45  * Option key: "logging-severity" \n
46  * Possible values: ["trace"/"debug"/"info"/"warning"/"error"/"fatal"] \n
47  * Description: Sets the logging severity level for ArmNN. Logging
48  * is turned off if this option is not provided.
49  *
50  * Option key: "gpu-tuning-level" \n
51  * Possible values: ["0"/"1"/"2"/"3"] \n
52  * Description: 0=UseOnly(default), 1=RapidTuning, 2=NormalTuning,
53  * 3=ExhaustiveTuning. Requires option gpu-tuning-file.
54  * 1,2 and 3 will create a tuning-file, 0 will apply the
55  * tunings from an existing file
56  *
57  * Option key: "gpu-tuning-file" \n
58  * Possible values: [filenameString] \n
59  * Description: File name for the tuning file.
60  *
61  * Option key: "gpu-kernel-profiling-enabled" \n
62  * Possible values: ["true"/"false"] \n
63  * Description: Enables GPU kernel profiling
64  *
65  * Option key: "reduce-fp32-to-fp16" \n
66  * Possible values: ["true"/"false"] \n
67  * Description: Reduce Fp32 data to Fp16 for faster processing
68  *
69  * Option key: "reduce-fp32-to-bf16" \n
70  * Possible values: ["true"/"false"] \n
71  * Description: Reduce Fp32 data to Bf16 for faster processing
72  *
73  * Option key: "debug-data" \n
74  * Possible values: ["true"/"false"] \n
75  * Description: Add debug data for easier troubleshooting
76  *
77  * Option key: "memory-import" \n
78  * Possible values: ["true"/"false"] \n
79  * Description: Enable memory import
80  *
81  *
82  * @param[in] option_keys Delegate option names
83  * @param[in] options_values Delegate option values
84  * @param[in] num_options Number of delegate options
85  * @param[in,out] report_error Error callback function
86  *
87  * @return An ArmNN delegate if it succeeds else NULL
88  */
89 TfLiteDelegate* tflite_plugin_create_delegate(char** options_keys,
90  char** options_values,
91  size_t num_options,
92  void (*report_error)(const char*))
93 {
94  // Returning null indicates an error during delegate creation so we initialize with that
95  TfLiteDelegate* delegate = nullptr;
96  try
97  {
98  // (Initializes with CpuRef backend)
100  armnn::OptimizerOptions optimizerOptions;
101  for (size_t i = 0; i < num_options; ++i)
102  {
103  // Process backends
104  if (std::string(options_keys[i]) == std::string("backends"))
105  {
106  // The backend option is a comma separated string of backendIDs that needs to be split
107  std::vector<armnn::BackendId> backends;
108  char* pch;
109  pch = strtok(options_values[i],",");
110  while (pch != NULL)
111  {
112  backends.push_back(pch);
113  pch = strtok (NULL, ",");
114  }
115  options.SetBackends(backends);
116  }
117  // Process logging level
118  else if (std::string(options_keys[i]) == std::string("logging-severity"))
119  {
120  options.SetLoggingSeverity(options_values[i]);
121  }
122  // Process GPU backend options
123  else if (std::string(options_keys[i]) == std::string("gpu-tuning-level"))
124  {
125  armnn::BackendOptions option("GpuAcc", {{"TuningLevel", atoi(options_values[i])}});
126  options.AddBackendOption(option);
127  }
128  else if (std::string(options_keys[i]) == std::string("gpu-mlgo-tuning-file"))
129  {
130  armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath", std::string(options_values[i])}});
131  options.AddBackendOption(option);
132  }
133  else if (std::string(options_keys[i]) == std::string("gpu-tuning-file"))
134  {
135  armnn::BackendOptions option("GpuAcc", {{"TuningFile", std::string(options_values[i])}});
136  options.AddBackendOption(option);
137  }
138  else if (std::string(options_keys[i]) == std::string("gpu-kernel-profiling-enabled"))
139  {
140  armnn::BackendOptions option("GpuAcc", {{"KernelProfilingEnabled", (*options_values[i] != '0')}});
141  options.AddBackendOption(option);
142  }
143  // Process reduce-fp32-to-fp16 option
144  else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-fp16"))
145  {
146  optimizerOptions.m_ReduceFp32ToFp16 = *options_values[i] != '0';
147  }
148  // Process reduce-fp32-to-bf16 option
149  else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-bf16"))
150  {
151  optimizerOptions.m_ReduceFp32ToBf16 = *options_values[i] != '0';
152  }
153  // Process debug-data
154  else if (std::string(options_keys[i]) == std::string("debug-data"))
155  {
156  optimizerOptions.m_Debug = *options_values[i] != '0';
157  }
158  // Process memory-import
159  else if (std::string(options_keys[i]) == std::string("memory-import"))
160  {
161  optimizerOptions.m_ImportEnabled = *options_values[i] != '0';
162  }
163  else
164  {
165  throw armnn::Exception("Unknown option for the ArmNN Delegate given: " + std::string(options_keys[i]));
166  }
167  }
168  options.SetOptimizerOptions(optimizerOptions);
169  delegate = TfLiteArmnnDelegateCreate(options);
170  }
171  catch (const std::exception& ex)
172  {
173  if(report_error)
174  {
175  report_error(ex.what());
176  }
177  }
178  return delegate;
179 }
180 
181 /** Destroy a given delegate plugin
182  *
183  * @param[in] delegate Delegate to destruct
184  */
185 void tflite_plugin_destroy_delegate(TfLiteDelegate* delegate)
186 {
188 }
189 
190 } // extern "C"
191 } // namespace tflite
TfLiteDelegate * tflite_plugin_create_delegate(char **options_keys, char **options_values, size_t num_options, void(*report_error)(const char *))
Create an ArmNN delegate plugin.
std::vector< std::string > gpu_options
This file defines two symbols that need to be exported to use the TFLite external delegate provider...
void SetLoggingSeverity(const armnn::LogSeverity &level)
Sets the severity level for logging within ArmNN that will be used on creation of the delegate...
Struct for the users to pass backend specific options.
TfLiteDelegate * TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
void SetBackends(const std::vector< armnn::BackendId > &backends)
void TfLiteArmnnDelegateDelete(TfLiteDelegate *tfLiteDelegate)
DelegateOptions TfLiteArmnnDelegateOptionsDefault()
void AddBackendOption(const armnn::BackendOptions &option)
Appends a backend option to the list of backend options.
void SetOptimizerOptions(const armnn::OptimizerOptions &optimizerOptions)
void tflite_plugin_destroy_delegate(TfLiteDelegate *delegate)
Destroy a given delegate plugin.