aboutsummaryrefslogtreecommitdiff
path: root/delegate/common
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2023-03-30 10:12:08 +0100
committerryan.oshea3 <ryan.oshea3@arm.com>2023-04-05 20:36:32 +0000
commitebe392df1635790bf21714549adb97f2f75559e1 (patch)
tree6fb8e56cc755d7c47a62bbe72c54b6ca5445377d /delegate/common
parentac9607f401dc30003aa97bd179a06d6b8a32139f (diff)
downloadarmnn-ebe392df1635790bf21714549adb97f2f75559e1.tar.gz
IVGCVSW-7562 Implement DelegateTestInterpreter for classic delegate
* Updated all tests to use new DelegateTestInterpreter. * Fixed some unit tests where the shape was incorrect. * Add file identifier to FlatBuffersBuilder, as it is required for validation when creating the model using new API. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I1c4f5464367b35d4528571fa94d14bfaef18fb4d
Diffstat (limited to 'delegate/common')
-rw-r--r--delegate/common/src/test/DelegateTestInterpreter.hpp175
-rw-r--r--delegate/common/src/test/DelegateTestInterpreterUtils.hpp110
2 files changed, 285 insertions, 0 deletions
diff --git a/delegate/common/src/test/DelegateTestInterpreter.hpp b/delegate/common/src/test/DelegateTestInterpreter.hpp
new file mode 100644
index 0000000000..0b63441ddd
--- /dev/null
+++ b/delegate/common/src/test/DelegateTestInterpreter.hpp
@@ -0,0 +1,175 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateTestInterpreterUtils.hpp>
+
+#include <armnn_delegate.hpp>
+
+#include <armnn/BackendId.hpp>
+#include <armnn/Exceptions.hpp>
+
+#include <tensorflow/lite/core/c/c_api.h>
+#include <tensorflow/lite/kernels/kernel_util.h>
+#include <tensorflow/lite/kernels/custom_ops_register.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/c/c_api_internal.h>
+
+namespace delegateTestInterpreter
+{
+
+class DelegateTestInterpreter
+{
+public:
+ /// Create TfLite Interpreter only
+ DelegateTestInterpreter(std::vector<char>& modelBuffer, const std::string& customOp = "")
+ {
+ TfLiteModel* model = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
+
+ TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
+ if (!customOp.empty())
+ {
+ options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
+ }
+
+ m_TfLiteInterpreter = TfLiteInterpreterCreate(model, options);
+ m_TfLiteDelegate = nullptr;
+
+ // The options and model can be deleted after the interpreter is created.
+ TfLiteInterpreterOptionsDelete(options);
+ TfLiteModelDelete(model);
+ }
+
+ /// Create Interpreter with default Arm NN Classic/Opaque Delegate applied
+ DelegateTestInterpreter(std::vector<char>& model,
+ const std::vector<armnn::BackendId>& backends,
+ const std::string& customOp = "",
+ bool disableFallback = true);
+
+ /// Create Interpreter with Arm NN Classic/Opaque Delegate applied and DelegateOptions
+ DelegateTestInterpreter(std::vector<char>& model,
+ const armnnDelegate::DelegateOptions& delegateOptions,
+ const std::string& customOp = "");
+
+ /// Allocate the TfLiteTensors within the graph.
+ /// This must be called before FillInputTensor(values, index) and Invoke().
+ TfLiteStatus AllocateTensors()
+ {
+ return TfLiteInterpreterAllocateTensors(m_TfLiteInterpreter);
+ }
+
+ /// Copy a buffer of values into an input tensor at a given index.
+ template<typename T>
+ TfLiteStatus FillInputTensor(std::vector<T>& inputValues, int index)
+ {
+ TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index);
+ return delegateTestInterpreter::CopyFromBufferToTensor(inputTensor, inputValues);
+ }
+
+ /// Copy a boolean buffer of values into an input tensor at a given index.
+ /// Boolean types get converted to a bit representation in a vector.
+ /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly.
+ TfLiteStatus FillInputTensor(std::vector<bool>& inputValues, int index)
+ {
+ TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index);
+ if(inputTensor->type != kTfLiteBool)
+ {
+ throw armnn::Exception("Input tensor at the given index is not of bool type: " + std::to_string(index));
+ }
+
+ // Make sure there is enough bytes allocated to copy into.
+ if(inputTensor->bytes < inputValues.size() * sizeof(bool))
+ {
+ throw armnn::Exception("Input tensor has not been allocated to match number of input values.");
+ }
+
+ for (unsigned int i = 0; i < inputValues.size(); ++i)
+ {
+ inputTensor->data.b[i] = inputValues[i];
+ }
+
+ return kTfLiteOk;
+ }
+
+ /// Run the interpreter either on TFLite Runtime or Arm NN Delegate.
+ /// AllocateTensors() must be called before Invoke().
+ TfLiteStatus Invoke()
+ {
+ return TfLiteInterpreterInvoke(m_TfLiteInterpreter);
+ }
+
+ /// Return a buffer of values from the output tensor at a given index.
+ /// This must be called after Invoke().
+ template<typename T>
+ std::vector<T> GetOutputResult(int index)
+ {
+ const TfLiteTensor* outputTensor =
+ delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
+
+ int64_t n = tflite::NumElements(outputTensor);
+ std::vector<T> output;
+ output.resize(n);
+
+ TfLiteStatus status = TfLiteTensorCopyToBuffer(outputTensor, output.data(), output.size() * sizeof(T));
+ if(status != kTfLiteOk)
+ {
+ throw armnn::Exception("An error occurred when copying output buffer.");
+ }
+
+ return output;
+ }
+
+ /// Return a buffer of values from the output tensor at a given index. This must be called after Invoke().
+ /// Boolean types get converted to a bit representation in a vector.
+ /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly.
+ std::vector<bool> GetOutputResult(int index)
+ {
+ const TfLiteTensor* outputTensor =
+ delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
+ if(outputTensor->type != kTfLiteBool)
+ {
+ throw armnn::Exception("Output tensor at the given index is not of bool type: " + std::to_string(index));
+ }
+
+ int64_t n = tflite::NumElements(outputTensor);
+ std::vector<bool> output(n, false);
+ output.reserve(n);
+
+ for (unsigned int i = 0; i < output.size(); ++i)
+ {
+ output[i] = outputTensor->data.b[i];
+ }
+ return output;
+ }
+
+ /// Return a buffer of dimensions from the output tensor at a given index.
+ std::vector<int32_t> GetOutputShape(int index)
+ {
+ const TfLiteTensor* outputTensor =
+ delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
+ int32_t numDims = TfLiteTensorNumDims(outputTensor);
+
+ std::vector<int32_t> dims;
+ dims.reserve(numDims);
+
+ for (int32_t i = 0; i < numDims; ++i)
+ {
+ dims.push_back(TfLiteTensorDim(outputTensor, i));
+ }
+ return dims;
+ }
+
+ /// Delete TfLiteInterpreter and the TfLiteDelegate/TfLiteOpaqueDelegate
+ void Cleanup();
+
+private:
+ TfLiteInterpreter* m_TfLiteInterpreter;
+
+ /// m_TfLiteDelegate can be TfLiteDelegate or TfLiteOpaqueDelegate
+ void* m_TfLiteDelegate;
+};
+
+} // anonymous namespace \ No newline at end of file
diff --git a/delegate/common/src/test/DelegateTestInterpreterUtils.hpp b/delegate/common/src/test/DelegateTestInterpreterUtils.hpp
new file mode 100644
index 0000000000..396c75c22e
--- /dev/null
+++ b/delegate/common/src/test/DelegateTestInterpreterUtils.hpp
@@ -0,0 +1,110 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Exceptions.hpp>
+
+#include <tensorflow/lite/core/c/c_api.h>
+#include <tensorflow/lite/kernels/custom_ops_register.h>
+#include <tensorflow/lite/kernels/register.h>
+
+#include <type_traits>
+
+namespace delegateTestInterpreter
+{
+
+inline TfLiteTensor* GetInputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index)
+{
+ TfLiteTensor* inputTensor = TfLiteInterpreterGetInputTensor(interpreter, index);
+ if(inputTensor == nullptr)
+ {
+ throw armnn::Exception("Input tensor was not found at the given index: " + std::to_string(index));
+ }
+ return inputTensor;
+}
+
+inline const TfLiteTensor* GetOutputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index)
+{
+ const TfLiteTensor* outputTensor = TfLiteInterpreterGetOutputTensor(interpreter, index);
+ if(outputTensor == nullptr)
+ {
+ throw armnn::Exception("Output tensor was not found at the given index: " + std::to_string(index));
+ }
+ return outputTensor;
+}
+
+inline TfLiteModel* CreateTfLiteModel(std::vector<char>& data)
+{
+ TfLiteModel* tfLiteModel = TfLiteModelCreate(data.data(), data.size());
+ if(tfLiteModel == nullptr)
+ {
+ throw armnn::Exception("An error has occurred when creating the TfLiteModel.");
+ }
+ return tfLiteModel;
+}
+
+inline TfLiteInterpreterOptions* CreateTfLiteInterpreterOptions()
+{
+ TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
+ if(options == nullptr)
+ {
+ throw armnn::Exception("An error has occurred when creating the TfLiteInterpreterOptions.");
+ }
+ return options;
+}
+
+inline tflite::ops::builtin::BuiltinOpResolver GenerateCustomOpResolver(const std::string& opName)
+{
+ tflite::ops::builtin::BuiltinOpResolver opResolver;
+ if (opName == "MaxPool3D")
+ {
+ opResolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
+ }
+ else if (opName == "AveragePool3D")
+ {
+ opResolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
+ }
+ else
+ {
+ throw armnn::Exception("The custom op isn't supported by the DelegateTestInterpreter.");
+ }
+ return opResolver;
+}
+
+template<typename T>
+inline TfLiteStatus CopyFromBufferToTensor(TfLiteTensor* tensor, std::vector<T>& values)
+{
+ // Make sure there is enough bytes allocated to copy into for uint8_t and int16_t case.
+ if(tensor->bytes < values.size() * sizeof(T))
+ {
+ throw armnn::Exception("Tensor has not been allocated to match number of values.");
+ }
+
+ // Requires uint8_t and int16_t specific case as the number of bytes is larger than values passed when creating
+ // TFLite tensors of these types. Otherwise, use generic TfLiteTensorCopyFromBuffer function.
+ TfLiteStatus status = kTfLiteOk;
+ if (std::is_same<T, uint8_t>::value)
+ {
+ for (unsigned int i = 0; i < values.size(); ++i)
+ {
+ tensor->data.uint8[i] = values[i];
+ }
+ }
+ else if (std::is_same<T, int16_t>::value)
+ {
+ for (unsigned int i = 0; i < values.size(); ++i)
+ {
+ tensor->data.i16[i] = values[i];
+ }
+ }
+ else
+ {
+ status = TfLiteTensorCopyFromBuffer(tensor, values.data(), values.size() * sizeof(T));
+ }
+ return status;
+}
+
+} // anonymous namespace \ No newline at end of file