ArmNN
 23.08
RefUnidirectionalSequenceLstmWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "Activation.hpp"
8 #include "Encoders.hpp"
9 #include "Decoders.hpp"
10 #include "Lstm.hpp"
11 #include "LstmUtils.hpp"
12 #include "RefWorkloadUtils.hpp"
13 
14 #include <armnnUtils/Permute.hpp>
15 
16 namespace armnn
17 {
18 
21  const WorkloadInfo& info)
23  , m_InputToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToInputWeights))
24  , m_InputToForgetWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToForgetWeights))
25  , m_InputToCellWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToCellWeights))
26  , m_InputToOutputWeightsTensor (AssignScopedTensorHandle(descriptor.m_InputToOutputWeights))
27  , m_RecurrentToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_RecurrentToInputWeights))
28  , m_RecurrentToForgetWeightsTensor(AssignScopedTensorHandle(descriptor.m_RecurrentToForgetWeights))
29  , m_RecurrentToCellWeightsTensor (AssignScopedTensorHandle(descriptor.m_RecurrentToCellWeights))
30  , m_RecurrentToOutputWeightsTensor(AssignScopedTensorHandle(descriptor.m_RecurrentToOutputWeights))
31  , m_CellToInputWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToInputWeights))
32  , m_CellToForgetWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToForgetWeights))
33  , m_CellToOutputWeightsTensor (AssignScopedTensorHandle(descriptor.m_CellToOutputWeights))
34  , m_InputGateBiasTensor (AssignScopedTensorHandle(descriptor.m_InputGateBias))
35  , m_ForgetGateBiasTensor (AssignScopedTensorHandle(descriptor.m_ForgetGateBias))
36  , m_CellBiasTensor (AssignScopedTensorHandle(descriptor.m_CellBias))
37  , m_OutputGateBiasTensor (AssignScopedTensorHandle(descriptor.m_OutputGateBias))
38  , m_ProjectionWeightsTensor (AssignScopedTensorHandle(descriptor.m_ProjectionWeights))
39  , m_ProjectionBiasTensor (AssignScopedTensorHandle(descriptor.m_ProjectionBias))
40  , m_InputLayerNormWeights (AssignScopedTensorHandle(descriptor.m_InputLayerNormWeights))
41  , m_ForgetLayerNormWeights (AssignScopedTensorHandle(descriptor.m_ForgetLayerNormWeights))
42  , m_CellLayerNormWeights (AssignScopedTensorHandle(descriptor.m_CellLayerNormWeights))
43  , m_OutputLayerNormWeights (AssignScopedTensorHandle(descriptor.m_OutputLayerNormWeights))
44 {}
45 
47 {
49 }
50 
52 {
53  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
54  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
55 }
56 
57 void RefUnidirectionalSequenceLstmWorkload::Execute(std::vector<ITensorHandle*> inputs,
58  std::vector<ITensorHandle*> outputs) const
59 {
60  ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefUnidirectionalSequenceLstmWorkload_Execute");
61 
62  TensorInfo inputInfo = GetTensorInfo(inputs[0]);
63  const TensorInfo& outputStateInfo = GetTensorInfo(inputs[1]);
64  const TensorInfo& cellStateInfo = GetTensorInfo(inputs[2]);
65  TensorInfo outputStateOutInfo = GetTensorInfo(outputs[0]);
66  TensorInfo cellStateOutInfo = GetTensorInfo(outputs[1]);
67  TensorInfo outputInfo = GetTensorInfo(outputs[2]);
68  TensorShape& inputShape = inputInfo.GetShape();
69  TensorShape& outputShape= outputInfo.GetShape();
70  auto inputTensor = reinterpret_cast<float*>(inputs[0]->Map());
71 
73  {
74  // Permute to time major
75  const PermutationVector& mappings = {1U, 0U, 2U};
76  std::vector<float> inputValue(inputTensor, inputTensor + inputInfo.GetNumElements());
77  inputShape = armnnUtils::Permuted(inputInfo.GetShape(), mappings);
78  inputInfo.SetShape(inputShape);
79  armnnUtils::Permute(inputShape, mappings, inputValue.data(), inputTensor, sizeof(float));
80 
81  outputShape = armnnUtils::Permuted(outputInfo.GetShape(), mappings);
82  outputInfo.SetShape(outputShape);
83  }
84  unsigned int maxTime = inputShape[0];
85  unsigned int batchSize = inputShape[1];
86  unsigned int outputSize = outputShape[2];
87  unsigned int inputSize = inputShape[2];
88 
89  TensorInfo scratchInfo = outputInfo;
90  scratchInfo.SetShape({batchSize, cellStateInfo.GetShape()[1]});
91 
92  std::vector<float> inputGateScratchBuffer;
93  std::vector<float> cellScratchBuffer(scratchInfo.GetNumElements(), 0.);
94  std::vector<float> forgetGateScratchBuffer(scratchInfo.GetNumElements(), 0.);
95  std::vector<float> outputGateScratchBuffer(scratchInfo.GetNumElements(), 0.);
96 
97  std::vector<float> outputStateOutBuffer(outputStateInfo.GetNumElements(), 0.);
98  std::vector<float> cellStateOutBuffer(cellStateInfo.GetNumElements(), 0.);
99 
100  void* outputStateOutData = outputStateOutBuffer.data();
101  void* cellStateOutData = cellStateOutBuffer.data();
102 
103  std::unique_ptr<Encoder<float>> inputGateScratch;
104  std::unique_ptr<Encoder<float>> cellScratch = MakeEncoder<float>(scratchInfo, cellScratchBuffer.data());
105  std::unique_ptr<Encoder<float>> forgetGateScratch = MakeEncoder<float>(scratchInfo, forgetGateScratchBuffer.data());
106  std::unique_ptr<Encoder<float>> outputGateScratch = MakeEncoder<float>(scratchInfo, outputGateScratchBuffer.data());
107 
108  std::unique_ptr<Decoder<float>> inputGateScratchDecoder;
109  std::unique_ptr<Decoder<float>> cellScratchDecoder = MakeDecoder<float>(scratchInfo, cellScratchBuffer.data());
110  std::unique_ptr<Decoder<float>> forgetGateScratchDecoder = MakeDecoder<float>(scratchInfo,
111  forgetGateScratchBuffer.data());
112  std::unique_ptr<Decoder<float>> outputGateScratchDecoder = MakeDecoder<float>(scratchInfo,
113  outputGateScratchBuffer.data());
114 
115  const bool useCifg = m_Data.m_Parameters.m_CifgEnabled;
116  const bool usePeephole = m_Data.m_Parameters.m_PeepholeEnabled;
117  const bool useLayerNorm = m_Data.m_Parameters.m_LayerNormEnabled;
118 
119  if (!useCifg)
120  {
121  inputGateScratchBuffer.resize(scratchInfo.GetNumElements(), 0.);
122  inputGateScratch = MakeEncoder<float>(scratchInfo, inputGateScratchBuffer.data());
123  inputGateScratchDecoder = MakeDecoder<float>(scratchInfo, inputGateScratchBuffer.data());
124  }
125 
126  std::unique_ptr<Encoder<float>> outputStateOut = MakeEncoder<float>(outputStateInfo, outputStateOutData);
127  std::unique_ptr<Encoder<float>> cellStateOut = MakeEncoder<float>(cellStateInfo, cellStateOutData);
128  std::unique_ptr<Decoder<float>> cellStateOutDecoder = MakeDecoder<float>(cellStateInfo, cellStateOutData);
129 
130  TensorInfo lstmInputInfo = inputInfo;
131  TensorShape batchInputShape = TensorShape({batchSize, inputSize});
132  lstmInputInfo.SetShape(batchInputShape);
133 
134  TensorInfo lstmOutputInfo = outputInfo;
135  lstmOutputInfo.SetShape({batchSize, outputSize});
136 
137  const TensorShape& inputToOutputWeightsShape = m_InputToOutputWeightsTensor->GetShape();
138  const TensorShape& recurrentToOutputWeightsShape = m_RecurrentToOutputWeightsTensor->GetShape();
139  unsigned int nOutput = recurrentToOutputWeightsShape[1];
140  auto outputStateInData = inputs[1]->Map();
141  std::unique_ptr<Decoder<float>> outputStateIn = MakeDecoder<float>(outputStateInfo, outputStateInData);
142 
143  auto cellStateInData = inputs[2]->Map();
144  std::unique_ptr<Decoder<float>> cellStateIn = MakeDecoder<float>(cellStateInfo, cellStateInData);
145 
146  auto currentInputData = reinterpret_cast<float*>(inputs[0]->Map());
147  std::unique_ptr<Decoder<float>> inputData = MakeDecoder<float>(lstmInputInfo, currentInputData);
148  auto currentOutputData = reinterpret_cast<float*>(outputs[2]->Map());
149  std::unique_ptr<Encoder<float>> output = MakeEncoder<float>(lstmOutputInfo, currentOutputData);
150  std::unique_ptr<Decoder<float>> outputDecoder = MakeDecoder<float>(lstmOutputInfo, currentOutputData);
151 
152  std::unique_ptr<Decoder<float>> inputToInputWeightsTensor;
153  std::unique_ptr<Decoder<float>> inputToForgetWeightsTensor = MakeDecoder<float>(
154  m_InputToForgetWeightsTensor->GetTensorInfo(), m_InputToForgetWeightsTensor->GetConstTensor<void>());
155  std::unique_ptr<Decoder<float>> inputToCellWeightsTensor = MakeDecoder<float>(
156  m_InputToCellWeightsTensor->GetTensorInfo(), m_InputToCellWeightsTensor->GetConstTensor<void>());
157  std::unique_ptr<Decoder<float>> inputToOutputWeightsTensor = MakeDecoder<float>(
158  m_InputToOutputWeightsTensor->GetTensorInfo(), m_InputToOutputWeightsTensor->GetConstTensor<void>());
159 
160  std::unique_ptr<Decoder<float>> recurrentToInputWeightsTensor;
161  std::unique_ptr<Decoder<float>> recurrentToForgetWeightsTensor = MakeDecoder<float>(
162  m_RecurrentToForgetWeightsTensor->GetTensorInfo(), m_RecurrentToForgetWeightsTensor->GetConstTensor<void>());
163  std::unique_ptr<Decoder<float>> recurrentToCellWeightsTensor = MakeDecoder<float>(
164  m_RecurrentToCellWeightsTensor->GetTensorInfo(), m_RecurrentToCellWeightsTensor->GetConstTensor<void>());
165  std::unique_ptr<Decoder<float>> recurrentToOutputWeightsTensor = MakeDecoder<float>(
166  m_RecurrentToOutputWeightsTensor->GetTensorInfo(), m_RecurrentToOutputWeightsTensor->GetConstTensor<void>());
167 
168  std::unique_ptr<Decoder<float>> inputGateBiasTensor;
169  std::unique_ptr<Decoder<float>> forgetGateBiasTensor = MakeDecoder<float>(
170  m_ForgetGateBiasTensor->GetTensorInfo(), m_ForgetGateBiasTensor->GetConstTensor<void>());
171  std::unique_ptr<Decoder<float>> cellBiasTensor = MakeDecoder<float>(
172  m_CellBiasTensor->GetTensorInfo(), m_CellBiasTensor->GetConstTensor<void>());
173  std::unique_ptr<Decoder<float>> outputGateBiasTensor = MakeDecoder<float>(
174  m_OutputGateBiasTensor->GetTensorInfo(), m_OutputGateBiasTensor->GetConstTensor<void>());
175 
176  std::unique_ptr<Decoder<float>> cellToInputWeightsTensor;
177  std::unique_ptr<Decoder<float>> cellToForgetWeightsTensor;
178  std::unique_ptr<Decoder<float>> cellToOutputWeightsTensor;
179 
180  std::unique_ptr<Decoder<float>> projectionWeightsTensor;
181  std::unique_ptr<Decoder<float>> projectionBiasTensor;
182 
183  std::unique_ptr<Decoder<float>> inputLayerNormWeights;
184  std::unique_ptr<Decoder<float>> forgetLayerNormWeights;
185  std::unique_ptr<Decoder<float>> cellLayerNormWeights;
186  std::unique_ptr<Decoder<float>> outputLayerNormWeights;
187 
188  if (useLayerNorm)
189  {
190  if (!useCifg)
191  {
192  inputLayerNormWeights = MakeDecoder<float>(
193  m_InputLayerNormWeights->GetTensorInfo(), m_InputLayerNormWeights->GetConstTensor<void>());
194  }
195  forgetLayerNormWeights = MakeDecoder<float>(
196  m_ForgetLayerNormWeights->GetTensorInfo(), m_ForgetLayerNormWeights->GetConstTensor<void>());
197  cellLayerNormWeights = MakeDecoder<float>(
198  m_CellLayerNormWeights->GetTensorInfo(), m_CellLayerNormWeights->GetConstTensor<void>());
199  outputLayerNormWeights = MakeDecoder<float>(
200  m_OutputLayerNormWeights->GetTensorInfo(), m_OutputLayerNormWeights->GetConstTensor<void>());
201  }
202 
203  if (!useCifg)
204  {
205  inputToInputWeightsTensor = MakeDecoder<float>(
206  m_InputToInputWeightsTensor->GetTensorInfo(), m_InputToInputWeightsTensor->GetConstTensor<void>());
207  inputGateBiasTensor = MakeDecoder<float>(
208  m_InputGateBiasTensor->GetTensorInfo(), m_InputGateBiasTensor->GetConstTensor<void>());
209  recurrentToInputWeightsTensor = MakeDecoder<float>(
210  m_RecurrentToInputWeightsTensor->GetTensorInfo(), m_RecurrentToInputWeightsTensor->GetConstTensor<void>());
211  }
212 
213  if (usePeephole)
214  {
215  cellToForgetWeightsTensor = MakeDecoder<float>(
216  m_CellToForgetWeightsTensor->GetTensorInfo(), m_CellToForgetWeightsTensor->GetConstTensor<void>());
217  cellToOutputWeightsTensor = MakeDecoder<float>(
218  m_CellToOutputWeightsTensor->GetTensorInfo(), m_CellToOutputWeightsTensor->GetConstTensor<void>());
219  }
220 
221  if (!useCifg && usePeephole)
222  {
223  cellToInputWeightsTensor = MakeDecoder<float>(
224  m_CellToInputWeightsTensor->GetTensorInfo(), m_CellToInputWeightsTensor->GetConstTensor<void>());
225  }
226 
228  {
229  projectionWeightsTensor = MakeDecoder<float>(
230  m_ProjectionWeightsTensor->GetTensorInfo(), m_ProjectionWeightsTensor->GetConstTensor<void>());
231  if (m_ProjectionBiasTensor)
232  {
233  projectionBiasTensor = MakeDecoder<float>(
234  m_ProjectionBiasTensor->GetTensorInfo(), m_ProjectionBiasTensor->GetConstTensor<void>());
235  }
236  }
237 
238  unsigned int batchInputSize = batchSize * inputSize;
239  unsigned int batchOutputSize = batchSize * nOutput;
240 
241  for (unsigned int t = 0; t < maxTime; ++t)
242  {
244  lstmInputInfo,
245  lstmOutputInfo,
246  inputToOutputWeightsShape,
247  recurrentToOutputWeightsShape,
248  inputData,
249  outputStateIn,
250  cellStateIn,
251  outputStateOut,
252  cellStateOut,
253  output,
254  cellStateOutDecoder,
255  outputDecoder,
256  inputToInputWeightsTensor,
257  inputToForgetWeightsTensor,
258  inputToCellWeightsTensor,
259  inputToOutputWeightsTensor,
260  recurrentToInputWeightsTensor,
261  recurrentToForgetWeightsTensor,
262  recurrentToCellWeightsTensor,
263  recurrentToOutputWeightsTensor,
264  cellToInputWeightsTensor,
265  cellToForgetWeightsTensor,
266  cellToOutputWeightsTensor,
267  inputGateBiasTensor,
268  forgetGateBiasTensor,
269  cellBiasTensor,
270  outputGateBiasTensor,
271  projectionWeightsTensor,
272  projectionBiasTensor,
273  inputLayerNormWeights,
274  forgetLayerNormWeights,
275  cellLayerNormWeights,
276  outputLayerNormWeights,
277  inputGateScratch,
278  cellScratch,
279  forgetGateScratch,
280  outputGateScratch,
281  inputGateScratchDecoder,
282  cellScratchDecoder,
283  forgetGateScratchDecoder,
284  outputGateScratchDecoder,
285  m_LayerNormEpsilon);
286 
287  currentInputData += batchInputSize;
288  inputData = MakeDecoder<float>(lstmInputInfo, currentInputData);
289  currentOutputData += batchOutputSize;
290  output = MakeEncoder<float>(lstmOutputInfo, currentOutputData);
291  outputDecoder = MakeDecoder<float>(lstmOutputInfo, currentOutputData);
292 
293  // Assign output state out to the next output state in
294  outputStateIn = MakeDecoder<float>(outputStateInfo, outputStateOutData);
295 
296  // Assign cell state out to the next cell state in
297  cellStateIn = MakeDecoder<float>(cellStateInfo, cellStateOutData);
298  }
299 
301  {
302  // Permute Output back to batch major
303  const PermutationVector& mappings = {1U, 0U, 2U};
304  auto outputData = reinterpret_cast<float*>(outputs[2]->Map());
305  std::vector<float> outputValue(outputData, outputData + outputInfo.GetNumElements());
306  outputShape = armnnUtils::Permuted(outputInfo.GetShape(), mappings);
307  outputInfo.SetShape(outputShape);
308  armnnUtils::Permute(outputShape, mappings, outputValue.data(), outputData, sizeof(float));
309  }
310 }
311 
312 } //namespace armnn
armnn::TensorInfo::GetNumElements
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
armnn::LstmDescriptor::m_TimeMajor
bool m_TimeMajor
Enable/disable time major.
Definition: Descriptors.hpp:1133
Lstm.hpp
armnn::experimental::ExecutionData::m_Data
void * m_Data
Definition: ExecutionData.hpp:16
armnn::TensorInfo
Definition: Tensor.hpp:152
AssignScopedTensorHandle
std::unique_ptr< armnn::ScopedTensorHandle > AssignScopedTensorHandle(const armnn::ConstTensorHandle *ptr)
Definition: LstmUtils.cpp:299
armnn::RefUnidirectionalSequenceLstmWorkload::ExecuteAsync
void ExecuteAsync(ExecutionData &executionData) override
Definition: RefUnidirectionalSequenceLstmWorkload.cpp:51
ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: RefWorkloadUtils.hpp:22
armnnUtils::Permute
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
armnnUtils::Permuted
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1127
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::LstmImpl
void LstmImpl(const LstmDescriptor &descriptor, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const TensorShape &inputToOutputWeightsShape, const TensorShape &recurrentToOutputWeightsShape, std::unique_ptr< Decoder< float >> &inputData, std::unique_ptr< Decoder< float >> &outputStateIn, std::unique_ptr< Decoder< float >> &cellStateIn, std::unique_ptr< Encoder< float >> &outputStateOut, std::unique_ptr< Encoder< float >> &cellStateOut, std::unique_ptr< Encoder< float >> &output, std::unique_ptr< Decoder< float >> &cellStateOutDecoder, std::unique_ptr< Decoder< float >> &outputDecoder, std::unique_ptr< Decoder< float >> &inputToInputWeightsTensor, std::unique_ptr< Decoder< float >> &inputToForgetWeightsTensor, std::unique_ptr< Decoder< float >> &inputToCellWeightsTensor, std::unique_ptr< Decoder< float >> &inputToOutputWeightsTensor, std::unique_ptr< Decoder< float >> &recurrentToInputWeightsTensor, std::unique_ptr< Decoder< float >> &recurrentToForgetWeightsTensor, std::unique_ptr< Decoder< float >> &recurrentToCellWeightsTensor, std::unique_ptr< Decoder< float >> &recurrentToOutputWeightsTensor, std::unique_ptr< Decoder< float >> &cellToInputWeightsTensor, std::unique_ptr< Decoder< float >> &cellToForgetWeightsTensor, std::unique_ptr< Decoder< float >> &cellToOutputWeightsTensor, std::unique_ptr< Decoder< float >> &inputGateBiasTensor, std::unique_ptr< Decoder< float >> &forgetGateBiasTensor, std::unique_ptr< Decoder< float >> &cellBiasTensor, std::unique_ptr< Decoder< float >> &outputGateBiasTensor, std::unique_ptr< Decoder< float >> &projectionWeightsTensor, std::unique_ptr< Decoder< float >> &projectionBiasTensor, std::unique_ptr< Decoder< float >> &inputLayerNormWeights, std::unique_ptr< Decoder< float >> &forgetLayerNormWeights, std::unique_ptr< Decoder< float >> &cellLayerNormWeights, std::unique_ptr< Decoder< float >> &outputLayerNormWeights, std::unique_ptr< Encoder< float >> &inputGateScratch, std::unique_ptr< Encoder< float >> &cellScratch, std::unique_ptr< Encoder< float >> &forgetGateScratch, std::unique_ptr< Encoder< float >> &outputGateScratch, std::unique_ptr< Decoder< float >> &inputGateScratchDecoder, std::unique_ptr< Decoder< float >> &cellScratchDecoder, std::unique_ptr< Decoder< float >> &forgetGateScratchDecoder, std::unique_ptr< Decoder< float >> &outputGateScratchDecoder, float layerNormEpsilon)
Definition: Lstm.cpp:13
armnn::QueueDescriptorWithParameters::m_Parameters
LayerDescriptor m_Parameters
Definition: WorkloadData.hpp:66
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::RefUnidirectionalSequenceLstmWorkload::RefUnidirectionalSequenceLstmWorkload
RefUnidirectionalSequenceLstmWorkload(const UnidirectionalSequenceLstmQueueDescriptor &descriptor, const WorkloadInfo &info)
Definition: RefUnidirectionalSequenceLstmWorkload.cpp:19
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition: RefWorkloadUtils.hpp:33
Activation.hpp
armnn::PermutationVector
Definition: Types.hpp:308
Permute.hpp
armnn::BoostLogSeverityMapping::info
@ info
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::RefUnidirectionalSequenceLstmWorkload::Execute
void Execute() const override
Definition: RefUnidirectionalSequenceLstmWorkload.cpp:46
RefWorkloadUtils.hpp
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1125
armnn::BaseWorkload< UnidirectionalSequenceLstmQueueDescriptor >::m_Data
UnidirectionalSequenceLstmQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1131
Decoders.hpp
armnn::experimental::WorkingMemDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkingMemDescriptor.hpp:20
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::experimental::WorkingMemDescriptor
Definition: WorkingMemDescriptor.hpp:18
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1129
armnn::UnidirectionalSequenceLstmQueueDescriptor
Definition: WorkloadData.hpp:691
LstmUtils.hpp
Encoders.hpp
armnn::RefBaseWorkload
Definition: RefBaseWorkload.hpp:13
RefUnidirectionalSequenceLstmWorkload.hpp
armnn::experimental::WorkingMemDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkingMemDescriptor.hpp:21
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::experimental::ExecutionData
Definition: ExecutionData.hpp:14