aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2018-11-14 12:39:55 +0000
committerMatteo Martincigh <matteo.martincigh@arm.com>2018-11-16 09:05:24 +0000
commita65b7aeafc0ef6acf40e4a8a6d36206bf53d717c (patch)
treed62257a911f3a4a4ed99243d4860a2453e95ec98 /src/armnn
parent74ba3dc7113e51cf11ab772ee1eb030c07a7dda5 (diff)
downloadarmnn-a65b7aeafc0ef6acf40e4a8a6d36206bf53d717c.tar.gz
IVGCVSW-2092 Port LSTMCell::Eval to ArmNN
* Ported Google's LSTM implementation to RefLstmFloat32Workload * Fixed the code throughout because of an error in the docs around the scratch buffer size * Updated IsLstmSupported * Added the unit tests !android-nn-driver:127 Change-Id: I5577b7e39ca52df1a7f102a9b437df6aa99520b6
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/layers/LstmLayer.cpp9
-rw-r--r--src/armnn/test/CreateWorkload.hpp8
-rw-r--r--src/armnn/test/OptimizerTests.cpp7
3 files changed, 5 insertions, 19 deletions
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 866c837357..bd104d49fe 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -123,14 +123,7 @@ std::vector<TensorShape> LstmLayer::InferOutputShapes(const std::vector<TensorSh
unsigned int numUnits = inputShapes[2][1];
std::vector<TensorShape> outShapes;
- if (!m_Param.m_CifgEnabled)
- {
- outShapes.push_back(TensorShape({batchSize, numUnits*3}));
- }
- else
- {
- outShapes.push_back(TensorShape({batchSize, numUnits*4}));
- }
+ outShapes.push_back(TensorShape({batchSize, numUnits * (m_Param.m_CifgEnabled ? 3 : 4)}));
outShapes.push_back(TensorShape({batchSize, outputSize}));
outShapes.push_back(TensorShape({batchSize, numUnits}));
outShapes.push_back(TensorShape({batchSize, outputSize}));
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 07f9079b5d..111df4b328 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -321,12 +321,8 @@ std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& fa
armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
- armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits*3 }, DataType::Float32);
- if (layerDesc.m_CifgEnabled)
- {
- lstmTensorInfoScratchBuff.SetShape({ batchSize, numUnits*4 });
- }
-
+ armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
+ DataType::Float32);
Connect(input, layer, lstmTensorInfo1, 0, 0);
Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 8bd7d3dbee..30ca52092a 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -154,11 +154,8 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
- armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits*3 }, DataType::Float32);
- if (layerDesc.m_CifgEnabled)
- {
- lstmTensorInfoScratchBuff.SetShape({ batchSize, numUnits*4 });
- }
+ armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
+ DataType::Float32);
Connect(input, layer, lstmTensorInfo1, 0, 0);
Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);