aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/optimizations
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-01 16:51:23 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-06 09:06:01 +0100
commitac2770a4bb6461bfbddec928bb6208f26f898f02 (patch)
treec72f67f648b7aca2f4bccf69b05d185bf5f9ccad /src/armnn/optimizations
parent7ee5d2c3b3cee5a924ed6347fef613ee07b5aca7 (diff)
downloadarmnn-ac2770a4bb6461bfbddec928bb6208f26f898f02.tar.gz
IVGCVSW-4485 Remove Boost assert
* Change boost assert to armnn assert * Change include file to armnn assert * Fix ARMNN_ASSERT_MSG issue with multiple conditions * Change BOOST_ASSERT to BOOST_TEST where appropriate * Remove unused include statements Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I5d0fa3a37b7c1c921216de68f0073aa34702c9ff
Diffstat (limited to 'src/armnn/optimizations')
-rw-r--r--src/armnn/optimizations/FoldPadIntoConvolution2d.hpp8
-rw-r--r--src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp4
-rw-r--r--src/armnn/optimizations/OptimizeInverseConversions.hpp2
-rw-r--r--src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp4
4 files changed, 9 insertions, 9 deletions
diff --git a/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp b/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp
index b2a2ba43ed..e598deb977 100644
--- a/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp
+++ b/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp
@@ -21,8 +21,8 @@ public:
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT(base.GetType() == LayerType::Pad);
- BOOST_ASSERT(child.GetType() == LayerType::Convolution2d);
+ ARMNN_ASSERT(base.GetType() == LayerType::Pad);
+ ARMNN_ASSERT(child.GetType() == LayerType::Convolution2d);
PadLayer* padLayer = boost::polymorphic_downcast<PadLayer*>(&base);
Convolution2dLayer* convolution2dLayer = boost::polymorphic_downcast<Convolution2dLayer*>(&child);
@@ -60,12 +60,12 @@ public:
newConv2dLayer.GetOutputHandler().SetTensorInfo(outInfo);
// Copy weights and bias to the new convolution layer
- BOOST_ASSERT_MSG(convolution2dLayer->m_Weight != nullptr,
+ ARMNN_ASSERT_MSG(convolution2dLayer->m_Weight != nullptr,
"FoldPadIntoConvolution2d: Weights data should not be null.");
newConv2dLayer.m_Weight = std::move(convolution2dLayer->m_Weight);
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(convolution2dLayer->m_Bias != nullptr,
+ ARMNN_ASSERT_MSG(convolution2dLayer->m_Bias != nullptr,
"FoldPadIntoConvolution2d: Bias data should not be null if bias is enabled.");
newConv2dLayer.m_Bias = std::move(convolution2dLayer->m_Bias);
}
diff --git a/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
index 53d4a3c4fd..39bfe6e936 100644
--- a/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
+++ b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
@@ -21,8 +21,8 @@ public:
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT(base.GetType() == LayerType::Reshape);
- BOOST_ASSERT(child.GetType() == LayerType::Reshape);
+ ARMNN_ASSERT(base.GetType() == LayerType::Reshape);
+ ARMNN_ASSERT(child.GetType() == LayerType::Reshape);
OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot();
diff --git a/src/armnn/optimizations/OptimizeInverseConversions.hpp b/src/armnn/optimizations/OptimizeInverseConversions.hpp
index 3ea4a5b279..d479445ce3 100644
--- a/src/armnn/optimizations/OptimizeInverseConversions.hpp
+++ b/src/armnn/optimizations/OptimizeInverseConversions.hpp
@@ -24,7 +24,7 @@ public:
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT((base.GetType() == LayerType::ConvertFp16ToFp32 &&
+ ARMNN_ASSERT((base.GetType() == LayerType::ConvertFp16ToFp32 &&
child.GetType() == LayerType::ConvertFp32ToFp16) ||
(base.GetType() == LayerType::ConvertFp32ToFp16 &&
child.GetType() == LayerType::ConvertFp16ToFp32));
diff --git a/src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp b/src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp
index 21aed869f5..ea4de9df6f 100644
--- a/src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp
+++ b/src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp
@@ -22,7 +22,7 @@ public:
{
// Validate base layer (the Permute) is compatible
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
- BOOST_ASSERT(base.GetType() == LayerType::Permute || base.GetType() == LayerType::Transpose);
+ ARMNN_ASSERT(base.GetType() == LayerType::Permute || base.GetType() == LayerType::Transpose);
const TensorInfo& inputInfo = base.GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& intermediateInfo = base.GetOutputSlot(0).GetTensorInfo();
if (intermediateInfo.GetNumDimensions() != 4)
@@ -39,7 +39,7 @@ public:
// Validate child layer (the BatchToSpace) is compatible
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT(child.GetType() == LayerType::BatchToSpaceNd);
+ ARMNN_ASSERT(child.GetType() == LayerType::BatchToSpaceNd);
const TensorInfo& outputInfo = child.GetOutputSlot(0).GetTensorInfo();
const BatchToSpaceNdDescriptor& batchToSpaceDesc = static_cast<BatchToSpaceNdLayer&>(child).GetParameters();
if (batchToSpaceDesc.m_DataLayout != DataLayout::NHWC)