From ebe392df1635790bf21714549adb97f2f75559e1 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Thu, 30 Mar 2023 10:12:08 +0100 Subject: IVGCVSW-7562 Implement DelegateTestInterpreter for classic delegate * Updated all tests to use new DelegateTestInterpreter. * Fixed some unit tests where the shape was incorrect. * Add file identifier to FlatBuffersBuilder, as it is required for validation when creating the model using new API. Signed-off-by: Matthew Sloyan Change-Id: I1c4f5464367b35d4528571fa94d14bfaef18fb4d --- .../common/src/test/DelegateTestInterpreter.hpp | 175 +++++++++++++++++++++ .../src/test/DelegateTestInterpreterUtils.hpp | 110 +++++++++++++ 2 files changed, 285 insertions(+) create mode 100644 delegate/common/src/test/DelegateTestInterpreter.hpp create mode 100644 delegate/common/src/test/DelegateTestInterpreterUtils.hpp (limited to 'delegate/common/src/test') diff --git a/delegate/common/src/test/DelegateTestInterpreter.hpp b/delegate/common/src/test/DelegateTestInterpreter.hpp new file mode 100644 index 0000000000..0b63441ddd --- /dev/null +++ b/delegate/common/src/test/DelegateTestInterpreter.hpp @@ -0,0 +1,175 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include + +namespace delegateTestInterpreter +{ + +class DelegateTestInterpreter +{ +public: + /// Create TfLite Interpreter only + DelegateTestInterpreter(std::vector& modelBuffer, const std::string& customOp = "") + { + TfLiteModel* model = delegateTestInterpreter::CreateTfLiteModel(modelBuffer); + + TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions(); + if (!customOp.empty()) + { + options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp); + } + + m_TfLiteInterpreter = TfLiteInterpreterCreate(model, options); + m_TfLiteDelegate = nullptr; + + // The options and model can be deleted after the interpreter is created. + TfLiteInterpreterOptionsDelete(options); + TfLiteModelDelete(model); + } + + /// Create Interpreter with default Arm NN Classic/Opaque Delegate applied + DelegateTestInterpreter(std::vector& model, + const std::vector& backends, + const std::string& customOp = "", + bool disableFallback = true); + + /// Create Interpreter with Arm NN Classic/Opaque Delegate applied and DelegateOptions + DelegateTestInterpreter(std::vector& model, + const armnnDelegate::DelegateOptions& delegateOptions, + const std::string& customOp = ""); + + /// Allocate the TfLiteTensors within the graph. + /// This must be called before FillInputTensor(values, index) and Invoke(). + TfLiteStatus AllocateTensors() + { + return TfLiteInterpreterAllocateTensors(m_TfLiteInterpreter); + } + + /// Copy a buffer of values into an input tensor at a given index. + template + TfLiteStatus FillInputTensor(std::vector& inputValues, int index) + { + TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index); + return delegateTestInterpreter::CopyFromBufferToTensor(inputTensor, inputValues); + } + + /// Copy a boolean buffer of values into an input tensor at a given index. + /// Boolean types get converted to a bit representation in a vector. + /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly. + TfLiteStatus FillInputTensor(std::vector& inputValues, int index) + { + TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index); + if(inputTensor->type != kTfLiteBool) + { + throw armnn::Exception("Input tensor at the given index is not of bool type: " + std::to_string(index)); + } + + // Make sure there is enough bytes allocated to copy into. + if(inputTensor->bytes < inputValues.size() * sizeof(bool)) + { + throw armnn::Exception("Input tensor has not been allocated to match number of input values."); + } + + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + inputTensor->data.b[i] = inputValues[i]; + } + + return kTfLiteOk; + } + + /// Run the interpreter either on TFLite Runtime or Arm NN Delegate. + /// AllocateTensors() must be called before Invoke(). + TfLiteStatus Invoke() + { + return TfLiteInterpreterInvoke(m_TfLiteInterpreter); + } + + /// Return a buffer of values from the output tensor at a given index. + /// This must be called after Invoke(). + template + std::vector GetOutputResult(int index) + { + const TfLiteTensor* outputTensor = + delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index); + + int64_t n = tflite::NumElements(outputTensor); + std::vector output; + output.resize(n); + + TfLiteStatus status = TfLiteTensorCopyToBuffer(outputTensor, output.data(), output.size() * sizeof(T)); + if(status != kTfLiteOk) + { + throw armnn::Exception("An error occurred when copying output buffer."); + } + + return output; + } + + /// Return a buffer of values from the output tensor at a given index. This must be called after Invoke(). + /// Boolean types get converted to a bit representation in a vector. + /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly. + std::vector GetOutputResult(int index) + { + const TfLiteTensor* outputTensor = + delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index); + if(outputTensor->type != kTfLiteBool) + { + throw armnn::Exception("Output tensor at the given index is not of bool type: " + std::to_string(index)); + } + + int64_t n = tflite::NumElements(outputTensor); + std::vector output(n, false); + output.reserve(n); + + for (unsigned int i = 0; i < output.size(); ++i) + { + output[i] = outputTensor->data.b[i]; + } + return output; + } + + /// Return a buffer of dimensions from the output tensor at a given index. + std::vector GetOutputShape(int index) + { + const TfLiteTensor* outputTensor = + delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index); + int32_t numDims = TfLiteTensorNumDims(outputTensor); + + std::vector dims; + dims.reserve(numDims); + + for (int32_t i = 0; i < numDims; ++i) + { + dims.push_back(TfLiteTensorDim(outputTensor, i)); + } + return dims; + } + + /// Delete TfLiteInterpreter and the TfLiteDelegate/TfLiteOpaqueDelegate + void Cleanup(); + +private: + TfLiteInterpreter* m_TfLiteInterpreter; + + /// m_TfLiteDelegate can be TfLiteDelegate or TfLiteOpaqueDelegate + void* m_TfLiteDelegate; +}; + +} // anonymous namespace \ No newline at end of file diff --git a/delegate/common/src/test/DelegateTestInterpreterUtils.hpp b/delegate/common/src/test/DelegateTestInterpreterUtils.hpp new file mode 100644 index 0000000000..396c75c22e --- /dev/null +++ b/delegate/common/src/test/DelegateTestInterpreterUtils.hpp @@ -0,0 +1,110 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include +#include + +#include + +namespace delegateTestInterpreter +{ + +inline TfLiteTensor* GetInputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index) +{ + TfLiteTensor* inputTensor = TfLiteInterpreterGetInputTensor(interpreter, index); + if(inputTensor == nullptr) + { + throw armnn::Exception("Input tensor was not found at the given index: " + std::to_string(index)); + } + return inputTensor; +} + +inline const TfLiteTensor* GetOutputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index) +{ + const TfLiteTensor* outputTensor = TfLiteInterpreterGetOutputTensor(interpreter, index); + if(outputTensor == nullptr) + { + throw armnn::Exception("Output tensor was not found at the given index: " + std::to_string(index)); + } + return outputTensor; +} + +inline TfLiteModel* CreateTfLiteModel(std::vector& data) +{ + TfLiteModel* tfLiteModel = TfLiteModelCreate(data.data(), data.size()); + if(tfLiteModel == nullptr) + { + throw armnn::Exception("An error has occurred when creating the TfLiteModel."); + } + return tfLiteModel; +} + +inline TfLiteInterpreterOptions* CreateTfLiteInterpreterOptions() +{ + TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate(); + if(options == nullptr) + { + throw armnn::Exception("An error has occurred when creating the TfLiteInterpreterOptions."); + } + return options; +} + +inline tflite::ops::builtin::BuiltinOpResolver GenerateCustomOpResolver(const std::string& opName) +{ + tflite::ops::builtin::BuiltinOpResolver opResolver; + if (opName == "MaxPool3D") + { + opResolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D()); + } + else if (opName == "AveragePool3D") + { + opResolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D()); + } + else + { + throw armnn::Exception("The custom op isn't supported by the DelegateTestInterpreter."); + } + return opResolver; +} + +template +inline TfLiteStatus CopyFromBufferToTensor(TfLiteTensor* tensor, std::vector& values) +{ + // Make sure there is enough bytes allocated to copy into for uint8_t and int16_t case. + if(tensor->bytes < values.size() * sizeof(T)) + { + throw armnn::Exception("Tensor has not been allocated to match number of values."); + } + + // Requires uint8_t and int16_t specific case as the number of bytes is larger than values passed when creating + // TFLite tensors of these types. Otherwise, use generic TfLiteTensorCopyFromBuffer function. + TfLiteStatus status = kTfLiteOk; + if (std::is_same::value) + { + for (unsigned int i = 0; i < values.size(); ++i) + { + tensor->data.uint8[i] = values[i]; + } + } + else if (std::is_same::value) + { + for (unsigned int i = 0; i < values.size(); ++i) + { + tensor->data.i16[i] = values[i]; + } + } + else + { + status = TfLiteTensorCopyFromBuffer(tensor, values.data(), values.size() * sizeof(T)); + } + return status; +} + +} // anonymous namespace \ No newline at end of file -- cgit v1.2.1