From ac2770a4bb6461bfbddec928bb6208f26f898f02 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Wed, 1 Apr 2020 16:51:23 +0100 Subject: IVGCVSW-4485 Remove Boost assert * Change boost assert to armnn assert * Change include file to armnn assert * Fix ARMNN_ASSERT_MSG issue with multiple conditions * Change BOOST_ASSERT to BOOST_TEST where appropriate * Remove unused include statements Signed-off-by: Narumol Prangnawarat Change-Id: I5d0fa3a37b7c1c921216de68f0073aa34702c9ff --- src/armnn/test/OptimizerTests.cpp | 16 ++++++++-------- src/armnn/test/QuantizerTest.cpp | 2 +- src/armnn/test/TensorHelpers.hpp | 4 ++-- src/armnn/test/TestUtils.cpp | 6 +++--- 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'src/armnn/test') diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index a7b23dbd86..c7883ffdb8 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -203,8 +203,8 @@ BOOST_AUTO_TEST_CASE(InsertConvertersTest) { if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition) { - BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16); - BOOST_ASSERT(layer->GetDataType() == DataType::Float16); + ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16); + ARMNN_ASSERT(layer->GetDataType() == DataType::Float16); } } @@ -223,18 +223,18 @@ BOOST_AUTO_TEST_CASE(InsertConvertersTest) { if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition) { - BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32); - BOOST_ASSERT(layer->GetDataType() == DataType::Float32); + ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32); + ARMNN_ASSERT(layer->GetDataType() == DataType::Float32); } else if (layer->GetType() == LayerType::ConvertFp16ToFp32) { - BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32); - BOOST_ASSERT(layer->GetDataType() == DataType::Float16); + ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32); + ARMNN_ASSERT(layer->GetDataType() == DataType::Float16); } else if (layer->GetType() == LayerType::ConvertFp32ToFp16) { - BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16); - BOOST_ASSERT(layer->GetDataType() == DataType::Float32); + ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16); + ARMNN_ASSERT(layer->GetDataType() == DataType::Float32); } } diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index ef9b2da782..ebdfbc5a40 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -336,7 +336,7 @@ TensorInfo GetInputTensorInfo(const Network* network) { for (auto&& inputLayer : network->GetGraph().GetInputLayers()) { - BOOST_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot"); + ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot"); return inputLayer->GetOutputSlot(0).GetTensorInfo(); } throw InvalidArgumentException("Network has no input layers"); diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp index 3f8589353c..ca148edefb 100644 --- a/src/armnn/test/TensorHelpers.hpp +++ b/src/armnn/test/TensorHelpers.hpp @@ -5,10 +5,10 @@ #pragma once #include +#include #include -#include #include #include #include @@ -192,7 +192,7 @@ boost::multi_array MakeTensor(const armnn::TensorInfo& tensorInfo) template boost::multi_array MakeTensor(const armnn::TensorInfo& tensorInfo, const std::vector& flat) { - BOOST_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor"); + ARMNN_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor"); std::array shape; diff --git a/src/armnn/test/TestUtils.cpp b/src/armnn/test/TestUtils.cpp index 8ef820b3d5..6d7d02dcff 100644 --- a/src/armnn/test/TestUtils.cpp +++ b/src/armnn/test/TestUtils.cpp @@ -5,15 +5,15 @@ #include "TestUtils.hpp" -#include +#include using namespace armnn; void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo, unsigned int fromIndex, unsigned int toIndex) { - BOOST_ASSERT(from); - BOOST_ASSERT(to); + ARMNN_ASSERT(from); + ARMNN_ASSERT(to); from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex)); from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo); -- cgit v1.2.1