diff options
author | Matteo Martincigh <matteo.martincigh@arm.com> | 2018-11-14 12:39:55 +0000 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2018-11-16 09:05:24 +0000 |
commit | a65b7aeafc0ef6acf40e4a8a6d36206bf53d717c (patch) | |
tree | d62257a911f3a4a4ed99243d4860a2453e95ec98 /src/backends/reference/workloads/RefLstmFloat32Workload.hpp | |
parent | 74ba3dc7113e51cf11ab772ee1eb030c07a7dda5 (diff) | |
download | armnn-a65b7aeafc0ef6acf40e4a8a6d36206bf53d717c.tar.gz |
IVGCVSW-2092 Port LSTMCell::Eval to ArmNN
* Ported Google's LSTM implementation to RefLstmFloat32Workload
* Fixed the code throughout because of an error in the docs around the
scratch buffer size
* Updated IsLstmSupported
* Added the unit tests
!android-nn-driver:127
Change-Id: I5577b7e39ca52df1a7f102a9b437df6aa99520b6
Diffstat (limited to 'src/backends/reference/workloads/RefLstmFloat32Workload.hpp')
-rw-r--r-- | src/backends/reference/workloads/RefLstmFloat32Workload.hpp | 24 |
1 files changed, 23 insertions, 1 deletions
diff --git a/src/backends/reference/workloads/RefLstmFloat32Workload.hpp b/src/backends/reference/workloads/RefLstmFloat32Workload.hpp index 1f634d3ca1..a2dead8b9c 100644 --- a/src/backends/reference/workloads/RefLstmFloat32Workload.hpp +++ b/src/backends/reference/workloads/RefLstmFloat32Workload.hpp @@ -5,6 +5,8 @@ #pragma once +#include <armnn/TypesUtils.hpp> + #include <backendsCommon/Workload.hpp> #include <backendsCommon/WorkloadData.hpp> @@ -14,8 +16,28 @@ namespace armnn class RefLstmFloat32Workload : public Float32Workload<LstmQueueDescriptor> { public: - using Float32Workload<LstmQueueDescriptor>::Float32Workload; + explicit RefLstmFloat32Workload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBiasTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBiasTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_CellBiasTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBiasTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeightsTensor; + std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBiasTensor; }; } //namespace armnn |