diff options
author | Finn Williams <Finn.Williams@arm.com> | 2020-11-25 14:32:42 +0000 |
---|---|---|
committer | finn.williams <finn.williams@arm.com> | 2020-11-30 17:00:41 +0000 |
commit | bbbefecd34a9420bcb003dd230402c55ee5150d5 (patch) | |
tree | a2a75780106abfa81e14d6e11f568e395bcb67de /tests/ExecuteNetwork | |
parent | 31c39be002b9e9040b2306e2461ee228853b4ed6 (diff) | |
download | armnn-bbbefecd34a9420bcb003dd230402c55ee5150d5.tar.gz |
IVGCVSW-5587 Remove Tensorflow requirement from Arm NN TfLite delegate
* Added support for building the delegate with an external armnn path
* Replaced potentially troublesome package manager
* Explicitly set the privacy levels of delegate libraries
* Fixed some error handling in ExecuteNetwork
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I2a7abc099796012cbb043c5b319f81778c9f3b56
Diffstat (limited to 'tests/ExecuteNetwork')
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetwork.cpp | 34 |
1 files changed, 33 insertions, 1 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index be341b670a..00507e0c49 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -88,6 +88,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, if (params.m_InputTypes[inputIndex].compare("float") == 0) { auto inputData = tfLiteInterpreter->typed_tensor<float>(input); + + if(tfLiteInterpreter == NULL) + { + ARMNN_LOG(fatal) << "Input tensor is null, input type: " + "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + std::vector<float> tensorData; PopulateTensorWithDataGeneric<float>(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), @@ -100,6 +108,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_InputTypes[inputIndex].compare("int8") == 0) { auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input); + + if(tfLiteInterpreter == NULL) + { + ARMNN_LOG(fatal) << "Input tensor is null, input type: " + "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + std::vector<int8_t> tensorData; PopulateTensorWithDataGeneric<int8_t>(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), @@ -112,6 +128,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_InputTypes[inputIndex].compare("int") == 0) { auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input); + + if(tfLiteInterpreter == NULL) + { + ARMNN_LOG(fatal) << "Input tensor is null, input type: " + "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + std::vector<int32_t> tensorData; PopulateTensorWithDataGeneric<int32_t>(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), @@ -124,6 +148,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) { auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input); + + if(tfLiteInterpreter == NULL) + { + ARMNN_LOG(fatal) << "Input tensor is null, input type: " + "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + std::vector<uint8_t> tensorData; PopulateTensorWithDataGeneric<uint8_t>(tensorData, params.m_InputTensorShapes[inputIndex]->GetNumElements(), @@ -468,7 +500,7 @@ int main(int argc, const char* argv[]) #if defined(ARMNN_TF_LITE_DELEGATE) return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime); #else - ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support."; + ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support."; return EXIT_FAILURE; #endif } |