1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
|
//
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include "TestUtils.hpp"
#include <armnn_delegate.hpp>
#include <flatbuffers/flatbuffers.h>
#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/model.h>
#include <tensorflow/lite/schema/schema_generated.h>
#include <tensorflow/lite/version.h>
#include <doctest/doctest.h>
namespace
{
std::vector<char> CreateElementwiseUnaryTfLiteModel(tflite::BuiltinOperator unaryOperatorCode,
tflite::TensorType tensorType,
const std::vector <int32_t>& tensorShape)
{
using namespace tflite;
flatbuffers::FlatBufferBuilder flatBufferBuilder;
std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
std::array<flatbuffers::Offset<Tensor>, 2> tensors;
tensors[0] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
tensorType);
tensors[1] = CreateTensor(flatBufferBuilder,
flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
tensorType);
// create operator
const std::vector<int> operatorInputs{{0}};
const std::vector<int> operatorOutputs{{1}};
flatbuffers::Offset <Operator> unaryOperator =
CreateOperator(flatBufferBuilder,
0,
flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
const std::vector<int> subgraphInputs{{0}};
const std::vector<int> subgraphOutputs{{1}};
flatbuffers::Offset <SubGraph> subgraph =
CreateSubGraph(flatBufferBuilder,
flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
flatBufferBuilder.CreateVector(&unaryOperator, 1));
flatbuffers::Offset <flatbuffers::String> modelDescription =
flatBufferBuilder.CreateString("ArmnnDelegate: Elementwise Unary Operator Model");
flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, unaryOperatorCode);
flatbuffers::Offset <Model> flatbufferModel =
CreateModel(flatBufferBuilder,
TFLITE_SCHEMA_VERSION,
flatBufferBuilder.CreateVector(&operatorCode, 1),
flatBufferBuilder.CreateVector(&subgraph, 1),
modelDescription,
flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
flatBufferBuilder.Finish(flatbufferModel);
return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
}
void ElementwiseUnaryFP32Test(tflite::BuiltinOperator unaryOperatorCode,
std::vector<armnn::BackendId>& backends,
std::vector<float>& inputValues,
std::vector<float>& expectedOutputValues)
{
using namespace tflite;
std::vector<int32_t> inputShape { { 3, 1, 2} };
std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
::tflite::TensorType_FLOAT32,
inputShape);
const Model* tfLiteModel = GetModel(modelBuffer.data());
// Create TfLite Interpreters
std::unique_ptr<Interpreter> armnnDelegateInterpreter;
CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
(&armnnDelegateInterpreter) == kTfLiteOk);
CHECK(armnnDelegateInterpreter != nullptr);
CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
std::unique_ptr<Interpreter> tfLiteInterpreter;
CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
(&tfLiteInterpreter) == kTfLiteOk);
CHECK(tfLiteInterpreter != nullptr);
CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
// Create the ArmNN Delegate
armnnDelegate::DelegateOptions delegateOptions(backends);
std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
armnnDelegate::TfLiteArmnnDelegateDelete);
CHECK(theArmnnDelegate != nullptr);
// Modify armnnDelegateInterpreter to use armnnDelegate
CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
// Set input data
auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
for (unsigned int i = 0; i < inputValues.size(); ++i)
{
tfLiteDelageInputData[i] = inputValues[i];
}
auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
for (unsigned int i = 0; i < inputValues.size(); ++i)
{
armnnDelegateInputData[i] = inputValues[i];
}
// Run EnqueWorkload
CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
// Compare output data
armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, inputShape, expectedOutputValues);
}
} // anonymous namespace
|