1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
|
//
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/Exceptions.hpp>
#include <tensorflow/lite/core/c/c_api.h>
#include <tensorflow/lite/kernels/custom_ops_register.h>
#include <tensorflow/lite/kernels/register.h>
#include <type_traits>
namespace delegateTestInterpreter
{
inline TfLiteTensor* GetInputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index)
{
TfLiteTensor* inputTensor = TfLiteInterpreterGetInputTensor(interpreter, index);
if(inputTensor == nullptr)
{
throw armnn::Exception("Input tensor was not found at the given index: " + std::to_string(index));
}
return inputTensor;
}
inline const TfLiteTensor* GetOutputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index)
{
const TfLiteTensor* outputTensor = TfLiteInterpreterGetOutputTensor(interpreter, index);
if(outputTensor == nullptr)
{
throw armnn::Exception("Output tensor was not found at the given index: " + std::to_string(index));
}
return outputTensor;
}
inline TfLiteModel* CreateTfLiteModel(std::vector<char>& data)
{
TfLiteModel* tfLiteModel = TfLiteModelCreate(data.data(), data.size());
if(tfLiteModel == nullptr)
{
throw armnn::Exception("An error has occurred when creating the TfLiteModel.");
}
return tfLiteModel;
}
inline TfLiteInterpreterOptions* CreateTfLiteInterpreterOptions()
{
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
if(options == nullptr)
{
throw armnn::Exception("An error has occurred when creating the TfLiteInterpreterOptions.");
}
return options;
}
inline tflite::ops::builtin::BuiltinOpResolver GenerateCustomOpResolver(const std::string& opName)
{
tflite::ops::builtin::BuiltinOpResolver opResolver;
if (opName == "MaxPool3D")
{
opResolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
}
else if (opName == "AveragePool3D")
{
opResolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
}
else
{
throw armnn::Exception("The custom op isn't supported by the DelegateTestInterpreter.");
}
return opResolver;
}
template<typename T>
inline TfLiteStatus CopyFromBufferToTensor(TfLiteTensor* tensor, std::vector<T>& values)
{
// Make sure there is enough bytes allocated to copy into for uint8_t and int16_t case.
if(tensor->bytes < values.size() * sizeof(T))
{
throw armnn::Exception("Tensor has not been allocated to match number of values.");
}
// Requires uint8_t and int16_t specific case as the number of bytes is larger than values passed when creating
// TFLite tensors of these types. Otherwise, use generic TfLiteTensorCopyFromBuffer function.
TfLiteStatus status = kTfLiteOk;
if (std::is_same<T, uint8_t>::value)
{
for (unsigned int i = 0; i < values.size(); ++i)
{
tensor->data.uint8[i] = values[i];
}
}
else if (std::is_same<T, int16_t>::value)
{
for (unsigned int i = 0; i < values.size(); ++i)
{
tensor->data.i16[i] = values[i];
}
}
else
{
status = TfLiteTensorCopyFromBuffer(tensor, values.data(), values.size() * sizeof(T));
}
return status;
}
} // anonymous namespace
|