aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers/QuantizedLstmLayer.cpp
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-01 16:51:23 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-06 09:06:01 +0100
commitac2770a4bb6461bfbddec928bb6208f26f898f02 (patch)
treec72f67f648b7aca2f4bccf69b05d185bf5f9ccad /src/armnn/layers/QuantizedLstmLayer.cpp
parent7ee5d2c3b3cee5a924ed6347fef613ee07b5aca7 (diff)
downloadarmnn-ac2770a4bb6461bfbddec928bb6208f26f898f02.tar.gz
IVGCVSW-4485 Remove Boost assert
* Change boost assert to armnn assert * Change include file to armnn assert * Fix ARMNN_ASSERT_MSG issue with multiple conditions * Change BOOST_ASSERT to BOOST_TEST where appropriate * Remove unused include statements Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I5d0fa3a37b7c1c921216de68f0073aa34702c9ff
Diffstat (limited to 'src/armnn/layers/QuantizedLstmLayer.cpp')
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp28
1 files changed, 14 insertions, 14 deletions
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 8717041a53..b56ae3ff52 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -78,7 +78,7 @@ QuantizedLstmLayer* QuantizedLstmLayer::Clone(Graph& graph) const
std::vector<TensorShape> QuantizedLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 3);
+ ARMNN_ASSERT(inputShapes.size() == 3);
// Get input values for validation
unsigned int numBatches = inputShapes[0][0];
@@ -102,34 +102,34 @@ void QuantizedLstmLayer::ValidateTensorShapesFromInputs()
GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() // previousOutputIn
});
- BOOST_ASSERT(inferredShapes.size() == 2);
+ ARMNN_ASSERT(inferredShapes.size() == 2);
// Check weights and bias for nullptr
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToInputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToCellWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputGateBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputGateBias should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_ForgetGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_ForgetGateBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_ForgetGateBias should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_CellBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_CellBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_CellBias should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_OutputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_OutputGateBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
// Check output TensorShape(s) match inferred shape