diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-08-04 17:58:09 +0100 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-08-05 16:33:19 +0100 |
commit | aa2123093a9b22c268ca98b105611bb344e0ad51 (patch) | |
tree | cd3581cfd84c4f808d24697d9da1fe5647063df2 /tests/ExecuteNetwork/TfliteExecutor.cpp | |
parent | cb85bb77e756078ac138ffb4ecdc6f18a2a2d859 (diff) | |
download | armnn-aa2123093a9b22c268ca98b105611bb344e0ad51.tar.gz |
IVGCVSW-7157 ExNet. interpreter chooses a different input type.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: If00d8dab2846c484a1969fb152cb9f8bd16e1b3e
Diffstat (limited to 'tests/ExecuteNetwork/TfliteExecutor.cpp')
-rw-r--r-- | tests/ExecuteNetwork/TfliteExecutor.cpp | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp index 98b6c9dad1..59c69f9d6a 100644 --- a/tests/ExecuteNetwork/TfliteExecutor.cpp +++ b/tests/ExecuteNetwork/TfliteExecutor.cpp @@ -30,7 +30,7 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params) : m_Params(pa armnnDelegate::TfLiteArmnnDelegateDelete); // Register armnn_delegate to TfLiteInterpreter status = m_TfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate)); - if (status == kTfLiteError) + if (status != kTfLiteOk) { LogAndThrow("Could not register ArmNN TfLite Delegate to TfLiteInterpreter"); } @@ -40,14 +40,14 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params) : m_Params(pa std::cout << "Running on TfLite without ArmNN delegate\n"; } - armnn::Optional<std::string> dataFile = m_Params.m_GenerateTensorData - ? armnn::EmptyOptional() - : armnn::MakeOptional<std::string>(m_Params.m_InputTensorDataFilePaths[0]); - const size_t numInputs = m_Params.m_InputNames.size(); for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex) { + armnn::Optional<std::string> dataFile = m_Params.m_GenerateTensorData + ? armnn::EmptyOptional() + : armnn::MakeOptional<std::string>(m_Params.m_InputTensorDataFilePaths[inputIndex]); + int input = m_TfLiteInterpreter->inputs()[inputIndex]; TfLiteIntArray* inputDims = m_TfLiteInterpreter->tensor(input)->dims; @@ -58,39 +58,39 @@ TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params) : m_Params(pa inputSize *= inputDims->data[dim]; } - const auto& inputName = m_TfLiteInterpreter->input_tensor(input)->name; - const auto& dataType = m_TfLiteInterpreter->input_tensor(input)->type; + const auto& inputName = m_TfLiteInterpreter->tensor(input)->name; + const auto& dataType = m_TfLiteInterpreter->tensor(input)->type; switch (dataType) { case kTfLiteFloat32: { auto inputData = m_TfLiteInterpreter->typed_tensor<float>(input); - PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + PopulateTensorWithData<float>(inputData, inputSize, dataFile, inputName); break; } case kTfLiteInt32: { - auto inputData = m_TfLiteInterpreter->typed_tensor<int>(input); - PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + auto inputData = m_TfLiteInterpreter->typed_tensor<int32_t>(input); + PopulateTensorWithData<int32_t>(inputData, inputSize, dataFile, inputName); break; } case kTfLiteUInt8: { auto inputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(input); - PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + PopulateTensorWithData<uint8_t>(inputData, inputSize, dataFile, inputName); break; } case kTfLiteInt16: { auto inputData = m_TfLiteInterpreter->typed_tensor<int16_t>(input); - PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + PopulateTensorWithData<int16_t>(inputData, inputSize, dataFile, inputName); break; } case kTfLiteInt8: { auto inputData = m_TfLiteInterpreter->typed_tensor<int8_t>(input); - PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + PopulateTensorWithData<int8_t>(inputData, inputSize, dataFile, inputName); break; } default: |