ArmNN
 22.02
QLstmEndToEndTestImpl.cpp File Reference
#include "QLstmEndToEndTestImpl.hpp"
#include <CommonTestUtils.hpp>
#include "EndToEndTestImpl.hpp"
#include <armnn/INetwork.hpp>
#include <armnn/LstmParams.hpp>
#include <doctest/doctest.h>

Go to the source code of this file.

Functions

void QLstmEndToEnd (const std::vector< armnn::BackendId > &backends)
 

Function Documentation

◆ QLstmEndToEnd()

void QLstmEndToEnd ( const std::vector< armnn::BackendId > &  backends)

Definition at line 36 of file QLstmEndToEndTestImpl.cpp.

References Connect(), INetwork::Create(), QLstmDescriptor::m_CellClip, QLstmDescriptor::m_CellIntermediateScale, QLstmDescriptor::m_CifgEnabled, QLstmDescriptor::m_ForgetIntermediateScale, QLstmDescriptor::m_HiddenStateScale, QLstmDescriptor::m_HiddenStateZeroPoint, QLstmDescriptor::m_InputIntermediateScale, LstmInputParams::m_InputToForgetWeights, QLstmDescriptor::m_LayerNormEnabled, QLstmDescriptor::m_OutputIntermediateScale, QLstmDescriptor::m_PeepholeEnabled, QLstmDescriptor::m_ProjectionClip, QLstmDescriptor::m_ProjectionEnabled, armnn::Optimize(), armnn::QAsymmS8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by TEST_SUITE().

37 {
38  const unsigned int numBatches = 2;
39  const unsigned int inputSize = 5;
40  const unsigned int outputSize = 4;
41  const unsigned int numUnits = 4;
42 
43  bool cifgEnabled = true;
44  bool peepholeEnabled = false;
45  bool projectionEnabled = false;
46  bool layerNormEnabled = true;
47 
48  // Scale/Offset quantization info
49  const float inputScale = 0.0078125f;
50  const int32_t inputOffset = 0;
51 
52  const int32_t hiddenStateZeroPoint = 0;
53  const float hiddenStateScale = 0.007f;
54 
55  // if (!projectionEnabled) outputScale == hiddenStateScale
56  const float outputScale = hiddenStateScale;
57  const int32_t outputOffset = hiddenStateZeroPoint;
58 
59  const float cellStateScale = 3.05176e-05f;
60  const int32_t cellStateOffset = 0;
61 
62  const float weightsScale = 0.00784314f;
63  const int32_t weightsOffset = 0;
64 
65  const float layerNormScale = 3.05182e-05f;
66  const int32_t layerNormOffset = 0;
67 
68  const float biasScale = layerNormScale / 1024;
69  const int32_t biasOffset = 0;
70 
71  const float inputIntermediateScale = 0.007059f;
72  const float forgetIntermediateScale = 0.007812f;
73  const float cellIntermediateScale = inputIntermediateScale;
74  const float outputIntermediateScale = forgetIntermediateScale;
75 
76  const float cellClip = 0.0f;
77  const float projectionClip = 0.0f;
78 
79  // Weights and bias tensor info
80  const armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
82  weightsScale,
83  weightsOffset,
84  true);
85 
86  const armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
88  weightsScale,
89  weightsOffset,
90  true);
91 
92  const armnn::TensorInfo biasInfo({outputSize},
94  biasScale,
95  biasOffset,
96  true);
97 
98  const armnn::TensorInfo layerNormWeightsInfo({numUnits},
100  layerNormScale,
101  layerNormOffset,
102  true);
103 
104  // Mandatory params
105  const std::vector<int8_t> inputToForgetWeightsVector =
106  {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64};
107  const std::vector<int8_t> inputToCellWeightsTensorVector =
108  {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77};
109  const std::vector<int8_t> inputToOutputWeightsTensorVector =
110  {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51};
111 
112  armnn::ConstTensor inputToForgetWeightsTensor(inputWeightsInfo, inputToForgetWeightsVector.data());
113  armnn::ConstTensor inputToCellWeightsTensor(inputWeightsInfo, inputToCellWeightsTensorVector.data());
114  armnn::ConstTensor inputToOutputWeightsTensor(inputWeightsInfo, inputToOutputWeightsTensorVector.data());
115 
116  const std::vector<int8_t> recurrentToForgetWeightsTensorVector =
117  {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25, 25, 38, -13, 51};
118  const std::vector<int8_t> recurrentToCellWeightsTensorVector =
119  {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25, 38, -13, 25, 64};
120  const std::vector<int8_t> recurrentToOutputWeightsTensorVector =
121  {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25, 13, 64, 25, -38};
122 
123  armnn::ConstTensor recurrentToForgetWeightsTensor(recurrentWeightsInfo,
124  recurrentToForgetWeightsTensorVector.data());
125  armnn::ConstTensor recurrentToCellWeightsTensor(recurrentWeightsInfo,
126  recurrentToCellWeightsTensorVector.data());
127  armnn::ConstTensor recurrentToOutputWeightsTensor(recurrentWeightsInfo,
128  recurrentToOutputWeightsTensorVector.data());
129 
130  const std::vector<int32_t> forgetGateBiasTensorVector = {2147484, -6442451, -4294968, 2147484};
131  const std::vector<int32_t> cellBiasTensorVector = {-1073742, 15461883, 5368709, 1717987};
132  const std::vector<int32_t> outputGateBiasTensorVector = {1073742, -214748, 4294968, 2147484};
133 
134  armnn::ConstTensor forgetGateBiasTensor(biasInfo, forgetGateBiasTensorVector.data());
135  armnn::ConstTensor cellBiasTensor(biasInfo, cellBiasTensorVector.data());
136  armnn::ConstTensor outputGateBiasTensor(biasInfo, outputGateBiasTensorVector.data());
137 
138  // Layer Norm
139  const std::vector<int16_t> forgetLayerNormWeightsVector = {6553, 6553, 13107, 9830};
140  const std::vector<int16_t> cellLayerNormWeightsVector = {22937, 6553, 9830, 26214};
141  const std::vector<int16_t> outputLayerNormWeightsVector = {19660, 6553, 6553, 16384};
142 
143  armnn::ConstTensor forgetLayerNormWeights(layerNormWeightsInfo, forgetLayerNormWeightsVector.data());
144  armnn::ConstTensor cellLayerNormWeights(layerNormWeightsInfo, cellLayerNormWeightsVector.data());
145  armnn::ConstTensor outputLayerNormWeights(layerNormWeightsInfo, outputLayerNormWeightsVector.data());
146 
147  // Set up params
148  armnn::LstmInputParams params;
149  params.m_InputToForgetWeights = &inputToForgetWeightsTensor;
150  params.m_InputToCellWeights = &inputToCellWeightsTensor;
151  params.m_InputToOutputWeights = &inputToOutputWeightsTensor;
152 
153  params.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
154  params.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
155  params.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
156 
157  params.m_ForgetGateBias = &forgetGateBiasTensor;
158  params.m_CellBias = &cellBiasTensor;
159  params.m_OutputGateBias = &outputGateBiasTensor;
160 
161  params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
162  params.m_CellLayerNormWeights = &cellLayerNormWeights;
163  params.m_OutputLayerNormWeights = &outputLayerNormWeights;
164 
165  QLstmDescriptor descriptor;
166  descriptor.m_CifgEnabled = cifgEnabled;
167  descriptor.m_PeepholeEnabled = peepholeEnabled;
168  descriptor.m_ProjectionEnabled = projectionEnabled;
169  descriptor.m_LayerNormEnabled = layerNormEnabled;
170 
171  descriptor.m_CellClip = cellClip;
172  descriptor.m_ProjectionClip = projectionClip;
173 
174  descriptor.m_HiddenStateZeroPoint = hiddenStateZeroPoint;
175  descriptor.m_HiddenStateScale = hiddenStateScale;
176 
177  descriptor.m_InputIntermediateScale = inputIntermediateScale;
178  descriptor.m_ForgetIntermediateScale = forgetIntermediateScale;
179  descriptor.m_CellIntermediateScale = cellIntermediateScale;
180  descriptor.m_OutputIntermediateScale = outputIntermediateScale;
181 
182  // Input/Output tensor info
183  const armnn::TensorInfo inputInfo({numBatches , inputSize},
185  inputScale,
186  inputOffset,
187  true);
188 
189  const armnn::TensorInfo cellStateInfo({numBatches , numUnits},
191  cellStateScale,
192  cellStateOffset,
193  true);
194 
195  const armnn::TensorInfo outputStateInfo({numBatches , outputSize},
197  outputScale,
198  outputOffset,
199  true);
200 
201  // Input tensor data
202  const std::vector<int8_t> inputVector = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
203  const std::vector<int8_t> outputStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
204  const std::vector<int16_t> cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
205 
206  // Expected output tensor data
207  const std::vector<int8_t> outputStateOutVector = {-15, 21, 14, 20, -15, 15, 5, 27};
208  const std::vector<int16_t> cellStateOutVector = {-11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149};
209  const std::vector<int8_t> outputVector = {-15, 21, 14, 20, -15, 15, 5, 27};
210 
211  // Build network
213 
214  armnn::IConnectableLayer* const input = net->AddInputLayer(0);
215  armnn::IConnectableLayer* const outputStateIn = net->AddInputLayer(1);
216  armnn::IConnectableLayer* const cellStateIn = net->AddInputLayer(2);
217 
218  armnn::IConnectableLayer* const qLstmLayer = net->AddQLstmLayer(descriptor, params, "qLstm");
219 
220  armnn::IConnectableLayer* const outputStateOut = net->AddOutputLayer(0);
221  armnn::IConnectableLayer* const cellStateOut = net->AddOutputLayer(1);
222  armnn::IConnectableLayer* const output = net->AddOutputLayer(2);
223 
224  // Connect input/output slots
225  Connect(input, qLstmLayer, inputInfo, 0, 0);
226  Connect(outputStateIn, qLstmLayer, outputStateInfo, 0, 1);
227  Connect(cellStateIn, qLstmLayer, cellStateInfo, 0, 2);
228 
229  Connect(qLstmLayer, outputStateOut, outputStateInfo, 0, 0);
230  Connect(qLstmLayer, cellStateOut, cellStateInfo, 1, 0);
231  Connect(qLstmLayer, output, outputStateInfo, 2, 0);
232 
233  // Create runtime
235  IRuntimePtr runtime(IRuntime::Create(options));
236 
237  // Optimize the network
238  IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
239 
240  // Loads network into runtime
241  NetworkId netId;
242  runtime->LoadNetwork(netId, std::move(optNet));
243 
244  // Push back input tensors
245  InputTensors inputTensors;
246  inputTensors.reserve(3);
247 
248  inputTensors.push_back({0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputVector.data())});
249  inputTensors.push_back({1, ConstTensor(runtime->GetInputTensorInfo(netId, 1), outputStateInVector.data())});
250  inputTensors.push_back({2, ConstTensor(runtime->GetInputTensorInfo(netId, 2), cellStateInVector.data())});
251 
252  // Push back output tensors
253  OutputTensors outputTensors;
254  outputTensors.reserve(3);
255 
256  std::vector<int8_t> outputStateOutResult(outputStateOutVector.size());
257  std::vector<int16_t> cellStateOutResult(cellStateOutVector.size());
258  std::vector<int8_t> outputResult(outputStateOutVector.size());
259 
260  outputTensors.push_back({0, Tensor(runtime->GetOutputTensorInfo(netId, 0), outputStateOutResult.data())});
261  outputTensors.push_back({1, Tensor(runtime->GetOutputTensorInfo(netId, 1), cellStateOutResult.data())});
262  outputTensors.push_back({2, Tensor(runtime->GetOutputTensorInfo(netId, 2), outputResult.data())});
263 
264  // Execute inference
265  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
266 
267  constexpr int8_t toleranceInt8 = 1;
268  for (unsigned int i = 0u; i < outputStateOutResult.size(); ++i)
269  {
270  CHECK(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceInt8));
271  }
272 
273  for (unsigned int i = 0u; i < outputResult.size(); ++i)
274  {
275  CHECK(IsCloseEnough(outputVector[i], outputResult[i], toleranceInt8));
276  }
277 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
bool m_PeepholeEnabled
Enable/disable peephole.
float m_HiddenStateScale
Hidden State quantization scale.
float m_OutputIntermediateScale
Output intermediate quantization scale.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:31
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
bool m_LayerNormEnabled
Enable/disable layer normalization.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1680
float m_ProjectionClip
Clipping threshold value for the projection.
int NetworkId
Definition: IRuntime.hpp:25
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
float m_InputIntermediateScale
Input intermediate quantization scale.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:242
A QLstmDescriptor for the QLstmLayer.
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
float m_CellClip
Clipping threshold value for the cell state.
bool m_ProjectionEnabled
Enable/disable the projection layer.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
float m_CellIntermediateScale
Cell intermediate quantization scale.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:492
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
int32_t m_HiddenStateZeroPoint
Hidden State zero point.