From bbbefecd34a9420bcb003dd230402c55ee5150d5 Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Wed, 25 Nov 2020 14:32:42 +0000 Subject: IVGCVSW-5587 Remove Tensorflow requirement from Arm NN TfLite delegate * Added support for building the delegate with an external armnn path * Replaced potentially troublesome package manager * Explicitly set the privacy levels of delegate libraries * Fixed some error handling in ExecuteNetwork Signed-off-by: Finn Williams Change-Id: I2a7abc099796012cbb043c5b319f81778c9f3b56 --- tests/ExecuteNetwork/ExecuteNetwork.cpp | 34 ++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index be341b670a..00507e0c49 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -88,6 +88,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, if (params.m_InputTypes[inputIndex].compare("float") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); + + if(tfLiteInterpreter == NULL) + { + ARMNN_LOG(fatal) << "Input tensor is null, input type: " + "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + std::vector tensorData; PopulateTensorWithDataGeneric(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), @@ -100,6 +108,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_InputTypes[inputIndex].compare("int8") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); + + if(tfLiteInterpreter == NULL) + { + ARMNN_LOG(fatal) << "Input tensor is null, input type: " + "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + std::vector tensorData; PopulateTensorWithDataGeneric(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), @@ -112,6 +128,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_InputTypes[inputIndex].compare("int") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); + + if(tfLiteInterpreter == NULL) + { + ARMNN_LOG(fatal) << "Input tensor is null, input type: " + "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + std::vector tensorData; PopulateTensorWithDataGeneric(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), @@ -124,6 +148,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); + + if(tfLiteInterpreter == NULL) + { + ARMNN_LOG(fatal) << "Input tensor is null, input type: " + "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + std::vector tensorData; PopulateTensorWithDataGeneric(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), @@ -468,7 +500,7 @@ int main(int argc, const char* argv[]) #if defined(ARMNN_TF_LITE_DELEGATE) return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime); #else - ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support."; + ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support."; return EXIT_FAILURE; #endif } -- cgit v1.2.1