aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/Network.cpp
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2020-03-20 08:49:33 +0000
committerJames Conroy <james.conroy@arm.com>2020-03-20 14:53:44 +0000
commit586a9aac99312eb9cb304cbbd18cec46b9158e23 (patch)
tree6d620eae6dcfb920ac04eae43424548dc602a1eb /src/armnn/Network.cpp
parentc94d3f7107b84b586791aa096f8641e6efa18c90 (diff)
downloadarmnn-586a9aac99312eb9cb304cbbd18cec46b9158e23.tar.gz
IVGCVSW-4549 Add front end for new QLSTM layer
* Added new layer QLstm (Android R HAL 1.3) * Made necessary updates to APIs * Added unit tests * This layer is functionally equivalent to the original unquantized LSTM layer with some additonal quantization features added. Due to this, original LstmParams are used for this layer. Signed-off-by: James Conroy <james.conroy@arm.com> Change-Id: I5b7f2d2fb6e17e81573b41a31bc55f49ae79608f
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r--src/armnn/Network.cpp141
1 files changed, 141 insertions, 0 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 9eef7b2fb6..7a6fa8f78c 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1670,6 +1670,147 @@ IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams
return layer;
}
+IConnectableLayer* Network::AddQLstmLayer(const QLstmDescriptor& descriptor,
+ const LstmInputParams& params,
+ const char* name)
+{
+ const auto layer = m_Graph->AddLayer<QLstmLayer>(descriptor, name);
+
+ // QLstm Basic Parameters
+ layer->m_BasicParameters.m_InputToForgetWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToForgetWeights));
+ layer->m_BasicParameters.m_InputToCellWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToCellWeights));
+ layer->m_BasicParameters.m_InputToOutputWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToOutputWeights));
+ layer->m_BasicParameters.m_RecurrentToForgetWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToForgetWeights));
+ layer->m_BasicParameters.m_RecurrentToCellWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToCellWeights));
+ layer->m_BasicParameters.m_RecurrentToOutputWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToOutputWeights));
+ layer->m_BasicParameters.m_ForgetGateBias =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetGateBias));
+ layer->m_BasicParameters.m_CellBias =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellBias));
+ layer->m_BasicParameters.m_OutputGateBias =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputGateBias));
+
+ // QLstm Cifg parameters
+ if(!descriptor.m_CifgEnabled)
+ {
+ if(params.m_InputToInputWeights == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Input To Input Weights cannot be NULL");
+ }
+
+ if(params.m_RecurrentToInputWeights == nullptr)
+ {
+ throw InvalidArgumentException(
+ "AddQLstmLayer: Recurrent To Input Weights cannot be NULL");
+ }
+
+ if(params.m_InputGateBias == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Input Gate Bias cannot be NULL");
+ }
+
+ layer->m_CifgParameters.m_InputToInputWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputToInputWeights));
+ layer->m_CifgParameters.m_RecurrentToInputWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_RecurrentToInputWeights));
+ layer->m_CifgParameters.m_InputGateBias =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputGateBias));
+ }
+
+ // QLstm Projection parameters
+ if(descriptor.m_ProjectionEnabled)
+ {
+ if(params.m_ProjectionWeights == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Projection Weights cannot be NULL");
+ }
+
+ if(params.m_ProjectionBias == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Projection Biases cannot be NULL");
+ }
+
+ layer->m_ProjectionParameters.m_ProjectionWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionWeights));
+ layer->m_ProjectionParameters.m_ProjectionBias =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_ProjectionBias));
+ }
+
+ // QLstm Peephole params
+ if(descriptor.m_PeepholeEnabled)
+ {
+ if(params.m_CellToForgetWeights == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Cell To Forget Weights cannot be NULL");
+ }
+
+ if(params.m_CellToOutputWeights == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Cell To Output Weights cannot be NULL");
+ }
+
+ if(!descriptor.m_CifgEnabled)
+ {
+ if(params.m_CellToInputWeights == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Cell To Input Weights cannot be NULL");
+ }
+
+ layer->m_PeepholeParameters.m_CellToInputWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToInputWeights));
+ }
+
+ layer->m_PeepholeParameters.m_CellToForgetWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToForgetWeights));
+ layer->m_PeepholeParameters.m_CellToOutputWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellToOutputWeights));
+ }
+
+ // QLstm Layer Normalization params
+ if(descriptor.m_LayerNormEnabled)
+ {
+ if(params.m_ForgetLayerNormWeights == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Forget layer normalization weights cannot be NULL");
+ }
+
+ if(params.m_CellLayerNormWeights == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Cell layer normalization weights cannot be NULL");
+ }
+
+ if(params.m_OutputLayerNormWeights == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Output layer normalization weights cannot be NULL");
+ }
+
+ if(!descriptor.m_CifgEnabled)
+ {
+ if(params.m_InputLayerNormWeights == nullptr)
+ {
+ throw InvalidArgumentException("AddQLstmLayer: Input layer normalization weights cannot be NULL");
+ }
+
+ layer->m_LayerNormParameters.m_InputLayerNormWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_InputLayerNormWeights));
+ }
+
+ layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_ForgetLayerNormWeights));
+ layer->m_LayerNormParameters.m_CellLayerNormWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_CellLayerNormWeights));
+ layer->m_LayerNormParameters.m_OutputLayerNormWeights =
+ std::make_unique<ScopedCpuTensorHandle>(*(params.m_OutputLayerNormWeights));
+ }
+ return layer;
+}
+
void Network::Accept(ILayerVisitor& visitor) const
{
for (auto layer : GetGraph())