16 #if defined(ARMNN_SERIALIZER) 19 #if defined(ARMNN_TF_LITE_PARSER) 22 #if defined(ARMNN_ONNX_PARSER) 25 #if defined(ARMNN_TFLITE_DELEGATE) 29 #include <tensorflow/lite/builtin_ops.h> 30 #include <tensorflow/lite/c/builtin_op_data.h> 31 #include <tensorflow/lite/c/common.h> 32 #include <tensorflow/lite/optional_debug_tools.h> 33 #include <tensorflow/lite/kernels/builtin_op_kernels.h> 34 #include <tensorflow/lite/interpreter.h> 35 #include <tensorflow/lite/kernels/register.h> 48 const double& thresholdTime)
50 ARMNN_LOG(info) <<
"Inference time: " << std::setprecision(2)
51 << std::fixed << duration.count() <<
" ms\n";
53 if (thresholdTime != 0.0)
55 ARMNN_LOG(info) <<
"Threshold time: " << std::setprecision(2)
56 << std::fixed << thresholdTime <<
" ms";
57 auto thresholdMinusInference = thresholdTime - duration.count();
58 ARMNN_LOG(info) <<
"Threshold time - Inference time: " << std::setprecision(2)
59 << std::fixed << thresholdMinusInference <<
" ms" <<
"\n";
60 if (thresholdMinusInference < 0)
62 std::string errorMessage =
"Elapsed inference time is greater than provided threshold time.";
70 #if defined(ARMNN_TFLITE_DELEGATE) 76 std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.
m_ModelPath.c_str());
78 auto tfLiteInterpreter = std::make_unique<Interpreter>();
79 tflite::ops::builtin::BuiltinOpResolver resolver;
81 tflite::InterpreterBuilder builder(*model, resolver);
82 builder(&tfLiteInterpreter);
83 tfLiteInterpreter->AllocateTensors();
95 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
99 status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
100 if (status != kTfLiteOk)
102 ARMNN_LOG(fatal) <<
"Could not register ArmNN TfLite Delegate to TfLiteInterpreter!";
108 std::cout <<
"Running on TfLite without ArmNN delegate\n";
119 for(
unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
121 int input = tfLiteInterpreter->inputs()[inputIndex];
122 TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
124 unsigned int inputSize = 1;
131 for (
unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
133 inputSize *= inputDims->data[dim];
137 if (params.
m_InputTypes[inputIndex].compare(
"float") == 0)
139 auto inputData = tfLiteInterpreter->typed_tensor<
float>(input);
141 if(inputData == NULL)
143 ARMNN_LOG(fatal) <<
"Input tensor is null, input type: " 144 "\"" << params.
m_InputTypes[inputIndex] <<
"\" may be incorrect.";
148 std::vector<float> tensorData;
149 PopulateTensorWithDataGeneric<float>(tensorData,
152 [](
const std::string& s)
153 {
return std::stof(s); });
155 std::copy(tensorData.begin(), tensorData.end(), inputData);
157 else if (params.
m_InputTypes[inputIndex].compare(
"qsymms8") == 0 ||
158 params.
m_InputTypes[inputIndex].compare(
"qasymms8") == 0)
160 auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
162 if(inputData == NULL)
164 ARMNN_LOG(fatal) <<
"Input tensor is null, input type: " 165 "\"" << params.
m_InputTypes[inputIndex] <<
"\" may be incorrect.";
169 std::vector<int8_t> tensorData;
170 PopulateTensorWithDataGeneric<int8_t>(tensorData,
173 [](
const std::string& s)
176 std::copy(tensorData.begin(), tensorData.end(), inputData);
178 else if (params.
m_InputTypes[inputIndex].compare(
"int") == 0)
180 auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
182 if(inputData == NULL)
184 ARMNN_LOG(fatal) <<
"Input tensor is null, input type: " 185 "\"" << params.
m_InputTypes[inputIndex] <<
"\" may be incorrect.";
189 std::vector<int32_t> tensorData;
190 PopulateTensorWithDataGeneric<int32_t>(tensorData,
193 [](
const std::string& s)
194 {
return std::stoi(s); });
196 std::copy(tensorData.begin(), tensorData.end(), inputData);
198 else if (params.
m_InputTypes[inputIndex].compare(
"qasymm8") == 0 ||
199 params.
m_InputTypes[inputIndex].compare(
"qasymmu8") == 0)
201 auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
203 if(inputData == NULL)
205 ARMNN_LOG(fatal) <<
"Input tensor is null, input type: " 206 "\"" << params.
m_InputTypes[inputIndex] <<
"\" may be incorrect.";
210 std::vector<uint8_t> tensorData;
211 PopulateTensorWithDataGeneric<uint8_t>(tensorData,
214 [](
const std::string& s)
217 std::copy(tensorData.begin(), tensorData.end(), inputData);
221 ARMNN_LOG(fatal) <<
"Unsupported input tensor data type \"" << params.
m_InputTypes[inputIndex] <<
"\". ";
232 status = tfLiteInterpreter->Invoke();
236 std::map<unsigned int, int> paramToTfliteOutputIndex;
237 for (
unsigned int paramIndex = 0; paramIndex < params.
m_OutputNames.size(); ++paramIndex)
239 paramToTfliteOutputIndex[paramIndex] = -1;
240 for (
unsigned int tfLiteIndex = 0; tfLiteIndex < tfLiteInterpreter->outputs().size(); ++tfLiteIndex)
242 if (params.
m_OutputNames[paramIndex] == tfLiteInterpreter->GetOutputName(tfLiteIndex))
244 paramToTfliteOutputIndex[paramIndex] = tfLiteIndex;
250 for (
unsigned int paramOutputIndex = 0; paramOutputIndex < params.
m_OutputNames.size(); ++paramOutputIndex)
252 int outputIndex = paramToTfliteOutputIndex[paramOutputIndex];
253 if (outputIndex == -1)
255 std::cout << fmt::format(
"Output name: {} doesn't exist.", params.
m_OutputNames[paramOutputIndex]) <<
259 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
260 TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
262 FILE* outputTensorFile = stdout;
266 if (outputTensorFile == NULL)
268 ARMNN_LOG(fatal) <<
"Specified output tensor file, \"" <<
270 "\", cannot be created. Defaulting to stdout. " <<
271 "Error was: " << std::strerror(errno);
272 outputTensorFile = stdout;
276 ARMNN_LOG(info) <<
"Writing output " << outputIndex <<
"' of iteration: " << x+1 <<
" to file: '" 281 for (
unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
283 outputSize *= outputDims->data[dim];
286 std::cout << tfLiteInterpreter->GetOutputName(outputIndex) <<
": ";
287 if (params.
m_OutputTypes[paramOutputIndex].compare(
"float") == 0)
289 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<
float>(tfLiteDelegateOutputId);
290 if(tfLiteDelageOutputData == NULL)
292 ARMNN_LOG(fatal) <<
"Output tensor is null, output type: " 293 "\"" << params.
m_OutputTypes[paramOutputIndex] <<
"\" may be incorrect.";
299 for (
int i = 0; i < outputSize; ++i)
301 fprintf(outputTensorFile,
"%f ", tfLiteDelageOutputData[i]);
305 else if (params.
m_OutputTypes[paramOutputIndex].compare(
"int") == 0)
307 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
308 if(tfLiteDelageOutputData == NULL)
310 ARMNN_LOG(fatal) <<
"Output tensor is null, output type: " 311 "\"" << params.
m_OutputTypes[paramOutputIndex] <<
"\" may be incorrect.";
317 for (
int i = 0; i < outputSize; ++i)
319 fprintf(outputTensorFile,
"%d ", tfLiteDelageOutputData[i]);
323 else if (params.
m_OutputTypes[paramOutputIndex].compare(
"qsymms8") == 0 ||
324 params.
m_OutputTypes[paramOutputIndex].compare(
"qasymms8") == 0)
326 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
327 if(tfLiteDelageOutputData == NULL)
329 ARMNN_LOG(fatal) <<
"Output tensor is null, output type: " 330 "\"" << params.
m_OutputTypes[paramOutputIndex] <<
"\" may be incorrect.";
336 for (
int i = 0; i < outputSize; ++i)
338 fprintf(outputTensorFile,
"%d ", tfLiteDelageOutputData[i]);
342 else if (params.
m_OutputTypes[paramOutputIndex].compare(
"qasymm8") == 0 ||
343 params.
m_OutputTypes[paramOutputIndex].compare(
"qasymmu8") == 0)
345 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
346 if(tfLiteDelageOutputData == NULL)
348 ARMNN_LOG(fatal) <<
"Output tensor is null, output type: " 349 "\"" << params.
m_OutputTypes[paramOutputIndex] <<
"\" may be incorrect.";
355 for (
int i = 0; i < outputSize; ++i)
357 fprintf(outputTensorFile,
"%u ", tfLiteDelageOutputData[i]);
363 ARMNN_LOG(fatal) <<
"Output tensor is null, output type: " 365 "\" may be incorrect. Output type can be specified with -z argument";
368 std::cout << std::endl;
376 template<
typename TParser,
typename TDataType>
378 const std::shared_ptr<armnn::IRuntime>& runtime =
nullptr)
382 std::vector<std::vector<armnnUtils::TContainer>> inputs;
383 std::vector<std::vector<armnnUtils::TContainer>> outputs;
434 armnn::MakeOptional<QuantizationParams>(
440 ARMNN_LOG(info) <<
"Given network has " << numInputs <<
" input/s. One input-tensor-data file is required " 441 <<
"for each input. The user provided " 443 <<
" input-tensor-data file/s which will be used to fill the input/s.\n";
448 std::vector<armnnUtils::TContainer> inputDataContainers;
449 for(
unsigned int i = 0; i < numInputs; ++i)
453 size_t inputFileIndex = j * numInputs + i;
461 armnn::MakeOptional<std::string>(
479 inputDataContainers.push_back(tensorData);
481 inputs.push_back(inputDataContainers);
500 ARMNN_LOG(warning) <<
"Model output index: " << outputIdx <<
" has data type Float32. The " <<
501 "corresponding --output-type is " << params.
m_OutputTypes[outputIdx] <<
502 ". This may cause unexpected problems or random failures.";
506 if (params.
m_OutputTypes[outputIdx].compare(
"qasymmu8") != 0)
508 ARMNN_LOG(warning) <<
"Model output index: " << outputIdx <<
" has data type QAsymmU8. The " <<
509 "corresponding --output-type is " << params.
m_OutputTypes[outputIdx] <<
510 ". This may cause unexpected problemsor random failures.";
516 ARMNN_LOG(warning) <<
"Model output index: " << outputIdx <<
" has data type Signed32. The " <<
517 "corresponding --output-type is " << params.
m_OutputTypes[outputIdx] <<
518 ". This may cause unexpected problems or random failures.";
522 if (params.
m_OutputTypes[outputIdx].compare(
"qasymms8") != 0)
524 ARMNN_LOG(warning) <<
"Model output index: " << outputIdx <<
" has data type QAsymmS8. The " <<
525 "corresponding --output-type is " << params.
m_OutputTypes[outputIdx] <<
526 ". This may cause unexpected problems or random failures.";
535 std::vector <armnnUtils::TContainer> outputDataContainers;
536 for (
unsigned int i = 0; i < numOutputs; ++i)
540 outputDataContainers.push_back(std::vector<float>(model.
GetOutputSize(i)));
544 outputDataContainers.push_back(std::vector<int>(model.
GetOutputSize(i)));
549 outputDataContainers.push_back(std::vector<uint8_t>(model.
GetOutputSize(i)));
553 outputDataContainers.push_back(std::vector<int8_t>(model.
GetOutputSize(i)));
560 outputs.push_back(outputDataContainers);
565 std::stringstream msg;
566 msg <<
"Network will be executed " << params.
m_Iterations;
569 msg <<
" times in an asynchronous manner. ";
573 msg <<
" times successively. ";
575 msg <<
"The input-tensor-data files will be reused recursively if the user didn't provide enough to " 576 "cover each execution.";
586 auto inference_duration = model.
Run(inputs[x], outputs[x]);
590 ARMNN_LOG(warning) <<
"The input data was generated, note that the output will not be useful";
594 ARMNN_LOG(info) <<
"Printing outputs to console is disabled.";
599 for (
size_t i = 0; i < numOutputs; i++)
607 size_t outputFileIndex = x * numOutputs + i;
611 ARMNN_LOG(info) <<
"Writing output " << i <<
" named: '" 613 <<
"' of iteration: " << x+1 <<
" to file: '" 625 mapbox::util::apply_visitor(printer, outputs[x][i]);
628 ARMNN_LOG(info) <<
"\nInference time: " << std::setprecision(2)
629 << std::fixed << inference_duration.count() <<
" ms\n";
634 ARMNN_LOG(info) <<
"Threshold time: " << std::setprecision(2)
636 auto thresholdMinusInference = params.
m_ThresholdTime - inference_duration.count();
637 ARMNN_LOG(info) <<
"Threshold time - Inference time: " << std::setprecision(2)
638 << std::fixed << thresholdMinusInference <<
" ms" <<
"\n";
640 if (thresholdMinusInference < 0)
642 std::string errorMessage =
"Elapsed inference time is greater than provided threshold time.";
653 ARMNN_LOG(info) <<
"Asynchronous execution with Arm NN thread pool... \n";
655 std::unordered_map<armnn::InferenceId, std::vector<armnnUtils::TContainer>&> inferenceOutputMap;
658 std::chrono::high_resolution_clock::time_point earliestStartTime;
659 std::chrono::high_resolution_clock::time_point latestEndTime =
660 std::chrono::high_resolution_clock::now();
666 std::shared_ptr<armnn::AsyncExecutionCallback> cb = callbackManager.
GetNewCallback();
667 inferenceOutputMap.insert({cb->GetInferenceId(), outputs[i]});
668 model.
RunAsync(inputs[i], outputs[i], cb);
673 for (
size_t iteration = 0; iteration < params.
m_Iterations; ++iteration)
678 auto endTime = time_point_cast<std::chrono::milliseconds>(cb->GetEndTime());
679 auto startTime = time_point_cast<std::chrono::milliseconds>(cb->GetStartTime());
680 auto inferenceDuration = endTime - startTime;
682 if (latestEndTime < cb->GetEndTime())
684 latestEndTime = cb->GetEndTime();
687 if (earliestStartTime.time_since_epoch().count() == 0)
689 earliestStartTime = cb->GetStartTime();
691 else if (earliestStartTime > cb->GetStartTime())
693 earliestStartTime = cb->GetStartTime();
698 ARMNN_LOG(warning) <<
"The input data was generated, note that the output will not be useful";
702 ARMNN_LOG(info) <<
"Printing outputs to console is disabled.";
707 for (
size_t i = 0; i < numOutputs; i++)
713 size_t outputFileIndex = iteration * numOutputs + i;
717 ARMNN_LOG(info) <<
"Writing output " << i <<
" named: '" 719 <<
"' of iteration: " << iteration+1 <<
" to file: '" 733 mapbox::util::apply_visitor(printer, inferenceOutputMap.at(cb->GetInferenceId())[i]);
740 auto overallEndTime = time_point_cast<std::chrono::milliseconds>(latestEndTime);
741 auto overallStartTime = time_point_cast<std::chrono::milliseconds>(earliestStartTime);
742 auto totalInferenceDuration = overallEndTime - overallStartTime;
743 ARMNN_LOG(info) <<
"\nOverall Inference time: " << std::setprecision(2)
744 << std::fixed << totalInferenceDuration.count() <<
" ms\n";
757 ARMNN_LOG(info) <<
"Asynchronous Execution with std::launch:async... \n";
758 std::vector<std::future<std::tuple<
unsigned int,
759 std::chrono::duration<double, std::milli>>>> inferenceResults;
763 std::vector<std::unique_ptr<armnn::experimental::IWorkingMemHandle>> workingMemHandles;
777 inferenceResults.push_back(std::async(
778 std::launch::async, [&model, &workingMemHandleRef, &inputs, &outputs, i]() {
779 return model.
RunAsync(workingMemHandleRef, inputs[i], outputs[i], i);
785 for (
unsigned int j = 0; j < inferenceResults.size(); ++j)
788 auto inferenceResult = inferenceResults[j].get();
789 auto inferenceDuration = std::get<1>(inferenceResult);
790 auto inferenceID = std::get<0>(inferenceResult);
794 ARMNN_LOG(warning) <<
"The input data was generated, note that the output will not be useful";
798 ARMNN_LOG(info) <<
"Printing outputs to console is disabled.";
803 for (
size_t i = 0; i < numOutputs; i++)
809 size_t outputFileIndex = j * numOutputs + i;
813 ARMNN_LOG(info) <<
"Writing output " << i <<
" named: '" 815 <<
"' of iteration: " << j+1 <<
" to file: '" 828 mapbox::util::apply_visitor(printer, outputs[j][i]);
831 ARMNN_LOG(info) <<
"Asynchronous Execution is finished for Inference ID: " << inferenceID <<
" \n";
835 ARMNN_LOG(info) <<
"\nOverall Inference time: " << std::setprecision(2)
836 << std::fixed << duration.count() <<
" ms\n";
855 int main(
int argc,
const char* argv[])
871 }
catch (
const std::exception &e){
880 ARMNN_LOG(fatal) <<
"You must enable profiling if you would like to output layer details";
887 if (modelFormat.find(
"armnn") != std::string::npos)
889 #if defined(ARMNN_SERIALIZER) 891 return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.
m_ExNetParams, runtime);
893 ARMNN_LOG(fatal) <<
"Not built with serialization support.";
897 else if (modelFormat.find(
"onnx") != std::string::npos)
899 #if defined(ARMNN_ONNX_PARSER) 901 return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.
m_ExNetParams, runtime);
903 ARMNN_LOG(fatal) <<
"Not built with Onnx parser support.";
907 else if(modelFormat.find(
"tflite") != std::string::npos)
911 #if defined(ARMNN_TF_LITE_PARSER) 913 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.
m_ExNetParams, runtime);
915 ARMNN_LOG(fatal) <<
"Not built with Tensorflow-Lite parser support.";
924 #if defined(ARMNN_TF_LITE_DELEGATE) 927 ARMNN_LOG(fatal) <<
"Not built with Arm NN Tensorflow-Lite delegate support.";
934 ARMNN_LOG(fatal) <<
"Unknown model format: '" << modelFormat
935 <<
"'. Please include 'tflite' or 'onnx'";
ExecuteNetworkParams m_ExNetParams
std::chrono::duration< double, std::milli > Run(const std::vector< armnnUtils::TContainer > &inputContainers, std::vector< armnnUtils::TContainer > &outputContainers)
std::vector< std::string > m_InputTypes
static IRuntimePtr Create(const CreationOptions &options)
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
std::string m_MLGOTuningFilePath
std::shared_ptr< AsyncExecutionCallback > GetNewCallback()
std::vector< TensorShapePtr > m_InputTensorShapes
QuantizationParams GetInputQuantizationParams(unsigned int inputIndex=0u) const
const std::vector< armnn::BindingPointInfo > & GetOutputBindingInfos() const
bool m_OutputDetailsOnlyToStdOut
void ConfigureLogging(bool printToStandardOutput, bool printToDebugOutput, LogSeverity severity)
Configures the logging behaviour of the ARMNN library.
bool m_EnableFp16TurboMode
std::string m_DynamicBackendsPath
std::string m_DynamicBackendsPath
const armnn::BindingPointInfo & GetOutputBindingInfo(unsigned int outputIndex=0u) const
virtual const char * what() const noexcept override
armnn::IRuntime::CreationOptions m_RuntimeOptions
#define ARMNN_LOG(severity)
std::tuple< unsigned int, std::chrono::duration< double, std::milli > > RunAsync(armnn::experimental::IWorkingMemHandle &workingMemHandleRef, const std::vector< armnnUtils::TContainer > &inputContainers, std::vector< armnnUtils::TContainer > &outputContainers, unsigned int inferenceID)
bool CheckInferenceTimeThreshold(const std::chrono::duration< double, std::milli > &duration, const double &thresholdTime)
Given a measured duration and a threshold time tell the user whether we succeeded or not...
bool m_EnableFp16TurboMode
std::chrono::high_resolution_clock::time_point GetTimeNow()
std::vector< std::string > m_OutputNames
TfLiteExecutor m_TfLiteExecutor
std::vector< std::string > m_OutputTensorFiles
bool m_VisualizePostOptimizationModel
bool m_OutputDetailsToStdOut
std::string m_CachedNetworkFilePath
bool m_EnableBf16TurboMode
unsigned int GetOutputSize(unsigned int outputIndex=0u) const
std::vector< std::string > m_InputBindings
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< std::string > m_OutputTypes
std::vector< armnn::TensorShape > m_InputShapes
bool m_GenerateTensorData
void PopulateTensorWithData(armnnUtils::TContainer &tensorData, unsigned int numElements, const std::string &dataTypeStr, const armnn::Optional< QuantizationParams > &qParams, const armnn::Optional< std::string > &dataFile)
Holds all parameters necessary to execute a network Check ExecuteNetworkProgramOptions.cpp for a description of each parameter.
std::vector< std::string > m_OutputBindings
std::vector< armnn::BackendId > m_ComputeDevices
unsigned int m_NumberOfThreads
std::vector< std::string > m_InputNames
bool m_EnableBf16TurboMode
std::vector< std::string > m_InputTensorDataFilePaths
void ParseOptions(int ac, const char *av[])
Parses program options from the command line or another source and stores the values in member variab...
Holds and parses program options for the ExecuteNetwork application.
TfLiteDelegate * TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
void SetExternalProfilingParams(const armnn::IRuntime::CreationOptions::ExternalProfilingOptions &externalProfilingParams)
bool m_PrintIntermediateLayers
std::string m_CachedNetworkFilePath
std::unique_ptr< armnn::experimental::IWorkingMemHandle > CreateWorkingMemHandle()
bool m_EnableLayerDetails
int main(int argc, const char *argv[])
Base class for all ArmNN exceptions so that users can filter to just those.
mapbox::util::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char >, std::vector< int8_t > > TContainer
unsigned int GetInputSize(unsigned int inputIndex=0u) const
bool m_OutputDetailsToStdOut
std::string m_MLGOTuningFilePath
void TfLiteArmnnDelegateDelete(TfLiteDelegate *tfLiteDelegate)
int MainImpl(const ExecuteNetworkParams ¶ms, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
unsigned int m_NumberOfThreads
bool m_OutputDetailsOnlyToStdOut
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
ExternalProfilingOptions m_ProfilingOptions
std::string m_ModelFormat
std::shared_ptr< AsyncExecutionCallback > GetNotifiedCallback()