aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers/QuantizedLstmLayer.cpp
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-02-09 15:56:23 +0000
committerFinn Williams <Finn.Williams@arm.com>2021-02-12 13:10:20 +0000
commitb454c5c65efb238c130b042ace390b2bc7f0bf75 (patch)
treed6681d0abf416b3cc280bc3bb70e7d55dfd40a0d /src/armnn/layers/QuantizedLstmLayer.cpp
parent8eae955f665f371b0a2c7c1a06e8ba442afa2298 (diff)
downloadarmnn-b454c5c65efb238c130b042ace390b2bc7f0bf75.tar.gz
IVGCVSW-4893 Refactor ILayerVisitor using unified interface strategy.
Signed-off-by: Jan Eilers <jan.eilers@arm.com> Signed-off-by: Finn Williams <Finn.Williams@arm.com> Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Change-Id: Id7bc8255a8e3f9e5aac65d510bec8a559bf37246
Diffstat (limited to 'src/armnn/layers/QuantizedLstmLayer.cpp')
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp87
1 files changed, 87 insertions, 0 deletions
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 624e443064..578d9eb137 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -291,4 +291,91 @@ void QuantizedLstmLayer::Accept(ILayerVisitor& visitor) const
visitor.VisitQuantizedLstmLayer(this, inputParams, GetName());
}
+void QuantizedLstmLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ std::vector<ConstTensor> constTensors;
+
+ // InputToX weight tensors
+ if (m_QuantizedLstmParameters.m_InputToInputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_InputToInputWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_InputToForgetWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_InputToCellWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_InputToCellWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_InputToOutputWeights->Map(true)));
+ }
+
+ // RecurrentToX weight tensors
+ if (m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_RecurrentToInputWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_RecurrentToForgetWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_RecurrentToCellWeights->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(
+ m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_RecurrentToOutputWeights->Map(true)));
+ }
+
+ // Bias tensors
+ if (m_QuantizedLstmParameters.m_InputGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_InputGateBias->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_ForgetGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_ForgetGateBias->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_CellBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_CellBias->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_CellBias->Map(true)));
+ }
+
+ if (m_QuantizedLstmParameters.m_OutputGateBias != nullptr)
+ {
+ constTensors.emplace_back(ConstTensor(m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo(),
+ m_QuantizedLstmParameters.m_OutputGateBias->Map(true)));
+ }
+
+
+ strategy.ExecuteStrategy(this, BaseDescriptor(), constTensors, GetName());
+}
+
} // namespace armnn