ArmNN
 20.11
QuantizedLstmEndToEndTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "CommonTestUtils.hpp"
9 #include "EndToEndTestImpl.hpp"
10 
11 #include <ResolveType.hpp>
12 
13 #include <armnn/INetwork.hpp>
15 
17 
18 #include <test/TensorHelpers.hpp>
19 
20 #include <boost/test/unit_test.hpp>
21 
22 #include <type_traits>
23 
24 namespace
25 {
26 
27 using MultiArray = const boost::multi_array<uint8_t, 2>&;
28 
29 armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input,
30  MultiArray expectedOutput)
31 {
32  auto batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
33  auto inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
34  auto outputSize = armnn::numeric_cast<unsigned int>(expectedOutput.shape()[1]);
35 
36  float inputOutputScale = 0.0078125f;
37  int32_t inputOutputOffset = 128;
38 
39  float weightsScale = 0.00408021f;
40  int32_t weightsOffset = 100;
41 
42  float biasScale = 3.1876640625e-05f;
43  int32_t biasOffset = 0;
44 
45  float cellStateScale = 0.00048828125f;
46  int32_t cellStateOffset = 0;
47 
48  armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
50  weightsScale,
51  weightsOffset);
52 
53  armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
55  weightsScale,
56  weightsOffset);
57 
58  armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
59 
61 
62  const std::vector<uint8_t> inputToInputWeightsVector = {146, 250, 235, 171, 10, 218, 171, 108};
63  armnn::ConstTensor inputToInputWeightsTensor(inputWeightsInfo, inputToInputWeightsVector.data());
64 
65  const std::vector<uint8_t> inputToForgetWeightsVector = {24, 50, 132, 179, 158, 110, 3, 169};
66  armnn::ConstTensor inputToForgetWeightsTensor(inputWeightsInfo, inputToForgetWeightsVector.data());
67 
68  const std::vector<uint8_t> inputToCellWeightsTensorVector = {133, 34, 29, 49, 206, 109, 54, 183};
69  armnn::ConstTensor inputToCellWeightsTensor(inputWeightsInfo, inputToCellWeightsTensorVector.data());
70 
71  const std::vector<uint8_t> inputToOutputWeightsTensorVector = {195, 187, 11, 99, 109, 10, 218, 48};
72  armnn::ConstTensor inputToOutputWeightsTensor(inputWeightsInfo, inputToOutputWeightsTensorVector.data());
73 
74  const std::vector<uint8_t> recurrentToInputWeightsTensorVector =
75  {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
76  armnn::ConstTensor recurrentToInputWeightsTensor(recurrentWeightsInfo, recurrentToInputWeightsTensorVector.data());
77 
78  const std::vector<uint8_t> recurrentToForgetWeightsTensorVector =
79  {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
80  armnn::ConstTensor recurrentToForgetWeightsTensor(recurrentWeightsInfo,
81  recurrentToForgetWeightsTensorVector.data());
82 
83  const std::vector<uint8_t> recurrentToCellWeightsTensorVector =
84  {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
85  armnn::ConstTensor recurrentToCellWeightsTensor(recurrentWeightsInfo, recurrentToCellWeightsTensorVector.data());
86 
87  const std::vector<uint8_t> recurrentToOutputWeightsTensorVector =
88  {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
89  armnn::ConstTensor recurrentToOutputWeightsTensor(recurrentWeightsInfo,
90  recurrentToOutputWeightsTensorVector.data());
91 
92  const std::vector<int32_t> inputGateBiasTensorVector = {-7876, 13488, -726, 32839};
93  armnn::ConstTensor inputGateBiasTensor(biasInfo, inputGateBiasTensorVector.data());
94 
95  const std::vector<int32_t> forgetGateBiasTensorVector = {9206, -46884, -11693, -38724};
96  armnn::ConstTensor forgetGateBiasTensor(biasInfo, forgetGateBiasTensorVector.data());
97 
98  const std::vector<int32_t> cellBiasTensorVector = {39481, 48624, 48976, -21419};
99  armnn::ConstTensor cellBiasTensor(biasInfo, cellBiasTensorVector.data());
100 
101  const std::vector<int32_t> outputGateBiasTensorVector = {-58999, -17050, -41852, -40538};
102  armnn::ConstTensor outputGateBiasTensor(biasInfo, outputGateBiasTensorVector.data());
103 
104  data.m_InputToInputWeights = &inputToInputWeightsTensor;
105  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
106  data.m_InputToCellWeights = &inputToCellWeightsTensor;
107  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
108  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
109  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
110  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
111  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
112  data.m_InputGateBias = &inputGateBiasTensor;
113  data.m_ForgetGateBias = &forgetGateBiasTensor;
114  data.m_CellBias = &cellBiasTensor;
115  data.m_OutputGateBias = &outputGateBiasTensor;
116 
118 
119  armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0);
120  armnn::IConnectableLayer* const cellStateIn = net->AddInputLayer(1);
121  armnn::IConnectableLayer* const outputStateIn = net->AddInputLayer(2);
122  armnn::IConnectableLayer* const quantizedLstmLayer = net->AddQuantizedLstmLayer(data, "quantizedLstm");
123  armnn::IConnectableLayer* const cellStateOut = net->AddOutputLayer(0);
124  armnn::IConnectableLayer* const outputStateOut = net->AddOutputLayer(1);
125 
126  armnn::TensorInfo inputTensorInfo({batchSize , inputSize},
128  inputOutputScale,
129  inputOutputOffset);
130 
131  armnn::TensorInfo cellStateInTensorInfo({batchSize , outputSize},
133  cellStateScale,
134  cellStateOffset);
135 
136  armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize},
138  inputOutputScale,
139  inputOutputOffset);
140 
141  armnn::TensorInfo cellStateOutTensorInfo({batchSize, outputSize},
143  cellStateScale,
144  cellStateOffset);
145 
146  armnn::TensorInfo outputTensorInfo({batchSize, outputSize},
148  inputOutputScale,
149  inputOutputOffset);
150 
151  // connect up
152  // inputs
153  Connect(inputLayer, quantizedLstmLayer, inputTensorInfo, 0, 0);
154  Connect(cellStateIn, quantizedLstmLayer, cellStateInTensorInfo, 0, 1);
155  Connect(outputStateIn, quantizedLstmLayer, outputStateInTensorInfo, 0, 2);
156 
157  // outputs
158  Connect(quantizedLstmLayer, cellStateOut, cellStateOutTensorInfo, 0, 0);
159  Connect(quantizedLstmLayer, outputStateOut, outputTensorInfo, 1, 0);
160 
161  return net;
162 }
163 
164 // Checks if two values of an arithmetic type are close enough to each other
165 // with regard to a given tolerance value.
166 template<typename T>
167 typename std::enable_if<std::is_arithmetic<T>::value, bool>::type
168 IsCloseEnough(T value1, T value2, T tolerance)
169 {
170  if (tolerance < 0)
171  {
172  throw armnn::InvalidArgumentException("Tolerance cannot be < 0");
173  }
174 
175  T diff = value1 >= value2 ? static_cast<T>(value1 - value2) : static_cast<T>(value2 - value1);
176  return diff <= tolerance;
177 }
178 
179 } // anonymous namespace
180 
181 void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
182 {
183  std::vector<uint8_t> inputVector = {166, 179, 50, 150};
184  armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
185  boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, inputVector);
186 
187  std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036};
188  armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QSymmS16);
189  boost::multi_array<int16_t, 2> cellStateIn = MakeTensor<int16_t, 2>(cellStateInDesc, cellStateInVector);
190 
191  std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112};
192  armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QAsymmU8);
193  boost::multi_array<uint8_t, 2> outputStateIn = MakeTensor<uint8_t, 2>(outputStateInDesc, outputStateInVector);
194 
195  std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
196  armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QSymmS16);
197  boost::multi_array<int16_t, 2> cellStateOut = MakeTensor<int16_t, 2>(cellStateOutVectorDesc, cellStateOutVector);
198 
199  std::vector<uint8_t> outputStateOutVector = {140, 151, 146, 112, 136, 156, 142, 112};
200  armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
201  boost::multi_array<uint8_t, 2> outputStateOut = MakeTensor<uint8_t, 2>(outputDesc, outputStateOutVector);
202 
203  // Builds up the structure of the network
204  armnn::INetworkPtr net = CreateQuantizedLstmNetwork(input, outputStateOut);
205 
206  BOOST_TEST_CHECKPOINT("create a network");
207 
209  IRuntimePtr runtime(IRuntime::Create(options));
210 
211  // optimize the network
212  IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
213 
214  // Loads it into the runtime.
215  NetworkId netId;
216  runtime->LoadNetwork(netId, std::move(optNet));
217 
218  InputTensors inputTensors;
219  inputTensors.reserve(3);
220 
221  // input
222  inputTensors.push_back({0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputVector.data())});
223  inputTensors.push_back({1, ConstTensor(runtime->GetInputTensorInfo(netId, 1), cellStateInVector.data())});
224  inputTensors.push_back({2, ConstTensor(runtime->GetInputTensorInfo(netId, 2), outputStateInVector.data())});
225 
226  OutputTensors outputTensors;
227  outputTensors.reserve(2);
228 
229  //output
230  std::vector<int16_t > cellStateOutResult(cellStateOutVector.size());
231  std::vector<uint8_t > outputStateOutResult(outputStateOutVector.size());
232  outputTensors.push_back({0, Tensor(runtime->GetOutputTensorInfo(netId, 0), cellStateOutResult.data())});
233  outputTensors.push_back({1, Tensor(runtime->GetOutputTensorInfo(netId, 1), outputStateOutResult.data())});
234 
235  // Does the inference.
236  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
237 
238  // Checks the results
239  constexpr int16_t toleranceInt16 = 2;
240  for (unsigned int i = 0u; i < cellStateOutResult.size(); ++i)
241  {
242  BOOST_CHECK(IsCloseEnough(cellStateOutVector[i], cellStateOutResult[i], toleranceInt16));
243  }
244 
245  constexpr uint8_t toleranceUint8 = 1;
246  for (unsigned int i = 0u; i < outputStateOutResult.size(); ++i)
247  {
248  BOOST_TEST(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceUint8));
249  }
250 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:340
int NetworkId
Definition: IRuntime.hpp:20
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:306
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1011
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:341
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:600
void QuantizedLstmEndToEnd(const std::vector< armnn::BackendId > &backends)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:46