ArmNN
 20.08
NeonLstmFloatWorkload.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
12 
13 #include "arm_compute/graph/Tensor.h"
14 #include "arm_compute/runtime/NEON/functions/NELSTMLayer.h"
15 
16 namespace armnn
17 {
18 
19 class NeonLstmFloatWorkload : public FloatWorkload<LstmQueueDescriptor>
20 {
21 public:
23  virtual void Execute() const override;
24 
25 private:
26  mutable arm_compute::NELSTMLayer m_LstmLayer;
27 
28  std::unique_ptr<arm_compute::Tensor> m_InputToInputWeightsTensor;
29  std::unique_ptr<arm_compute::Tensor> m_InputToForgetWeightsTensor;
30  std::unique_ptr<arm_compute::Tensor> m_InputToCellWeightsTensor;
31  std::unique_ptr<arm_compute::Tensor> m_InputToOutputWeightsTensor;
32  std::unique_ptr<arm_compute::Tensor> m_RecurrentToInputWeightsTensor;
33  std::unique_ptr<arm_compute::Tensor> m_RecurrentToForgetWeightsTensor;
34  std::unique_ptr<arm_compute::Tensor> m_RecurrentToCellWeightsTensor;
35  std::unique_ptr<arm_compute::Tensor> m_RecurrentToOutputWeightsTensor;
36  std::unique_ptr<arm_compute::Tensor> m_CellToInputWeightsTensor;
37  std::unique_ptr<arm_compute::Tensor> m_CellToForgetWeightsTensor;
38  std::unique_ptr<arm_compute::Tensor> m_CellToOutputWeightsTensor;
39  std::unique_ptr<arm_compute::Tensor> m_InputGateBiasTensor;
40  std::unique_ptr<arm_compute::Tensor> m_ForgetGateBiasTensor;
41  std::unique_ptr<arm_compute::Tensor> m_CellBiasTensor;
42  std::unique_ptr<arm_compute::Tensor> m_OutputGateBiasTensor;
43  std::unique_ptr<arm_compute::Tensor> m_ProjectionWeightsTensor;
44  std::unique_ptr<arm_compute::Tensor> m_ProjectionBiasTensor;
45 
46  std::unique_ptr<arm_compute::Tensor> m_ScratchBuffer;
47 
48  std::unique_ptr<arm_compute::Tensor> m_InputLayerNormWeightsTensor;
49  std::unique_ptr<arm_compute::Tensor> m_ForgetLayerNormWeightsTensor;
50  std::unique_ptr<arm_compute::Tensor> m_CellLayerNormWeightsTensor;
51  std::unique_ptr<arm_compute::Tensor> m_OutputLayerNormWeightsTensor;
52 
53  void FreeUnusedTensors();
54 };
55 
57  const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
58  const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
59  const TensorInfo& output, const LstmDescriptor &descriptor,
60  const LstmInputParamsInfo& paramsInfo);
61 
62 } //namespace armnn
virtual void Execute() const override
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
Copyright (c) 2020 ARM Limited.
An LstmDescriptor for the LstmLayer.
Status
enumeration
Definition: Types.hpp:26
NeonLstmFloatWorkload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
Contains information about inputs and outputs to a layer.