// // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include #include #include #include #include #include #include #include #include namespace delegateTestInterpreter { class DelegateTestInterpreter { public: /// Create TfLite Interpreter only DelegateTestInterpreter(std::vector& modelBuffer, const std::string& customOp = "") { TfLiteModel* model = delegateTestInterpreter::CreateTfLiteModel(modelBuffer); TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions(); if (!customOp.empty()) { options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp); } m_TfLiteInterpreter = TfLiteInterpreterCreate(model, options); m_TfLiteDelegate = nullptr; // The options and model can be deleted after the interpreter is created. TfLiteInterpreterOptionsDelete(options); TfLiteModelDelete(model); } /// Create Interpreter with default Arm NN Classic/Opaque Delegate applied DelegateTestInterpreter(std::vector& model, const std::vector& backends, const std::string& customOp = "", bool disableFallback = true); /// Create Interpreter with Arm NN Classic/Opaque Delegate applied and DelegateOptions DelegateTestInterpreter(std::vector& model, const armnnDelegate::DelegateOptions& delegateOptions, const std::string& customOp = ""); /// Allocate the TfLiteTensors within the graph. /// This must be called before FillInputTensor(values, index) and Invoke(). TfLiteStatus AllocateTensors() { return TfLiteInterpreterAllocateTensors(m_TfLiteInterpreter); } /// Copy a buffer of values into an input tensor at a given index. template TfLiteStatus FillInputTensor(std::vector& inputValues, int index) { TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index); return delegateTestInterpreter::CopyFromBufferToTensor(inputTensor, inputValues); } /// Copy a boolean buffer of values into an input tensor at a given index. /// Boolean types get converted to a bit representation in a vector. /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly. TfLiteStatus FillInputTensor(std::vector& inputValues, int index) { TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index); if(inputTensor->type != kTfLiteBool) { throw armnn::Exception("Input tensor at the given index is not of bool type: " + std::to_string(index)); } // Make sure there is enough bytes allocated to copy into. if(inputTensor->bytes < inputValues.size() * sizeof(bool)) { throw armnn::Exception("Input tensor has not been allocated to match number of input values."); } for (unsigned int i = 0; i < inputValues.size(); ++i) { inputTensor->data.b[i] = inputValues[i]; } return kTfLiteOk; } /// Run the interpreter either on TFLite Runtime or Arm NN Delegate. /// AllocateTensors() must be called before Invoke(). TfLiteStatus Invoke() { return TfLiteInterpreterInvoke(m_TfLiteInterpreter); } /// Return a buffer of values from the output tensor at a given index. /// This must be called after Invoke(). template std::vector GetOutputResult(int index) { const TfLiteTensor* outputTensor = delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index); int64_t n = tflite::NumElements(outputTensor); std::vector output; output.resize(n); TfLiteStatus status = TfLiteTensorCopyToBuffer(outputTensor, output.data(), output.size() * sizeof(T)); if(status != kTfLiteOk) { throw armnn::Exception("An error occurred when copying output buffer."); } return output; } /// Return a buffer of values from the output tensor at a given index. This must be called after Invoke(). /// Boolean types get converted to a bit representation in a vector. /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly. std::vector GetOutputResult(int index) { const TfLiteTensor* outputTensor = delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index); if(outputTensor->type != kTfLiteBool) { throw armnn::Exception("Output tensor at the given index is not of bool type: " + std::to_string(index)); } int64_t n = tflite::NumElements(outputTensor); std::vector output(n, false); output.reserve(n); for (unsigned int i = 0; i < output.size(); ++i) { output[i] = outputTensor->data.b[i]; } return output; } /// Return a buffer of dimensions from the output tensor at a given index. std::vector GetOutputShape(int index) { const TfLiteTensor* outputTensor = delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index); int32_t numDims = TfLiteTensorNumDims(outputTensor); std::vector dims; dims.reserve(numDims); for (int32_t i = 0; i < numDims; ++i) { dims.push_back(TfLiteTensorDim(outputTensor, i)); } return dims; } /// Delete TfLiteInterpreter and the TfLiteDelegate/TfLiteOpaqueDelegate void Cleanup(); private: TfLiteInterpreter* m_TfLiteInterpreter; /// m_TfLiteDelegate can be TfLiteDelegate or TfLiteOpaqueDelegate void* m_TfLiteDelegate; }; } // anonymous namespace