aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-06 16:46:21 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-07 14:55:36 +0000
commit4d07e5e0e2f32184e395f44cc50eedf3de284d22 (patch)
tree880d7a30c3d36f1892312da7ed6d04d7dbf2f040
parentdbae2476905291b82e4b9bb7828be83ae7ddd84f (diff)
downloadandroid-nn-driver-4d07e5e0e2f32184e395f44cc50eedf3de284d22.tar.gz
IVGCVSW-4485 Remove Boost assert
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: If602024a339df7548333e470545f9400c3daf7b3
-rw-r--r--ConversionUtils.cpp4
-rw-r--r--ConversionUtils.hpp24
-rw-r--r--RequestThread.cpp4
-rw-r--r--Utils.cpp7
-rw-r--r--test/1.2/Capabilities.cpp10
-rw-r--r--test/TestTensor.cpp2
-rw-r--r--test/TestTensor.hpp4
7 files changed, 30 insertions, 25 deletions
diff --git a/ConversionUtils.cpp b/ConversionUtils.cpp
index 09e51598..4c773964 100644
--- a/ConversionUtils.cpp
+++ b/ConversionUtils.cpp
@@ -31,7 +31,7 @@ bool LayerInputHandle::IsValid() const
void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
{
- BOOST_ASSERT(IsValid());
+ ARMNN_ASSERT(IsValid());
if (m_OutputSlot)
{
m_OutputSlot->Connect(inputSlot);
@@ -103,7 +103,7 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
armnn::IConnectableLayer* prevLayer,
ConversionData& data)
{
- BOOST_ASSERT(prevLayer->GetNumOutputSlots() == 1);
+ ARMNN_ASSERT(prevLayer->GetNumOutputSlots() == 1);
prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 8067e53b..3b01b40f 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -10,6 +10,7 @@
#include <armnn/ArmNN.hpp>
#include <armnn/ILayerSupport.hpp>
#include <armnn/BackendHelper.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
@@ -21,7 +22,6 @@
#include <CpuExecutor.h>
#include <OperationsUtils.h>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
@@ -269,7 +269,7 @@ armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
- BOOST_ASSERT(reshapeLayer != nullptr);
+ ARMNN_ASSERT(reshapeLayer != nullptr);
// Attach the input layer to the reshape layer
inputLayer.Connect(reshapeLayer->GetInputSlot(0));
@@ -283,7 +283,7 @@ bool BroadcastTensor(LayerInputHandle& input0,
armnn::IConnectableLayer* startLayer,
ConversionData& data)
{
- BOOST_ASSERT(startLayer != nullptr);
+ ARMNN_ASSERT(startLayer != nullptr);
const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
@@ -338,7 +338,7 @@ bool BroadcastTensor(LayerInputHandle& input0,
return false;
}
- BOOST_ASSERT(data.m_Network != nullptr);
+ ARMNN_ASSERT(data.m_Network != nullptr);
armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
if (input0IsSmaller)
@@ -498,7 +498,7 @@ armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& inp
// Add swizzle layer
armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
// Connect input to swizzle layer
input.Connect(layer->GetInputSlot(0));
@@ -619,7 +619,7 @@ bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
{
bool needPermute = false;
- BOOST_ASSERT(numberOfDimensions >= 3);
+ ARMNN_ASSERT(numberOfDimensions >= 3);
// ArmNN uses Compute Library subtensors to perform concatenation
// This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
@@ -685,7 +685,7 @@ const HalOperand* GetInputOperand(const HalOperation& operation,
}
// Model should have been validated beforehand
- BOOST_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
+ ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
return &getMainModel(model).operands[operation.inputs[inputIndex]];
}
@@ -704,7 +704,7 @@ const HalOperand* GetOutputOperand(const HalOperation& operation,
}
// Model should have been validated beforehand
- BOOST_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
+ ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
return &getMainModel(model).operands[operation.outputs[outputIndex]];
}
@@ -1453,7 +1453,7 @@ bool ConvertToActivation(const HalOperation& operation,
}
armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
@@ -1950,7 +1950,7 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model,
}
}
- BOOST_ASSERT(inputShapes.size() == inputHandles.size());
+ ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
if (inputsHaveBeenReshaped)
{
@@ -2677,7 +2677,7 @@ DequantizeResult DequantizeIfRequired(size_t operand_index,
}
const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
- BOOST_ASSERT(operand);
+ ARMNN_ASSERT(operand);
if (!IsQSymm8(*operand))
{
@@ -2701,7 +2701,7 @@ DequantizeResult DequantizeIfRequired(size_t operand_index,
for (size_t i = 0; i < dequantizedBufferLength; ++i)
{
float* dstPtr = dequantizedBuffer.get();
- BOOST_ASSERT(dstPtr);
+ ARMNN_ASSERT(dstPtr);
*dstPtr++ = quantizedBuffer[i] * quantizationScale;
}
diff --git a/RequestThread.cpp b/RequestThread.cpp
index 50c5161c..a177b1a4 100644
--- a/RequestThread.cpp
+++ b/RequestThread.cpp
@@ -17,7 +17,7 @@
#include "ArmnnPreparedModel_1_3.hpp"
#endif
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <log/log.h>
@@ -135,7 +135,7 @@ void RequestThread<PreparedModel, HalVersion, CallbackContext>::Process()
default:
// this should be unreachable
ALOGE("RequestThread::Process() - invalid message type");
- BOOST_ASSERT_MSG(false, "ArmNN: RequestThread: invalid message type");
+ ARMNN_ASSERT_MSG(false, "ArmNN: RequestThread: invalid message type");
}
}
}
diff --git a/Utils.cpp b/Utils.cpp
index 00d61c7b..a7df499c 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -11,6 +11,7 @@
#include <armnnUtils/Permute.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cassert>
#include <cerrno>
@@ -146,7 +147,7 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
if (perChannel)
{
// ExtraParams is expected to be of type channelQuant
- BOOST_ASSERT(operand.extraParams.getDiscriminator() ==
+ ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
auto perAxisQuantParams = operand.extraParams.channelQuant();
@@ -210,7 +211,7 @@ armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
if (perChannel)
{
// ExtraParams is expected to be of type channelQuant
- BOOST_ASSERT(operand.extraParams.getDiscriminator() ==
+ ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
auto perAxisQuantParams = operand.extraParams.channelQuant();
@@ -415,7 +416,7 @@ void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
return;
}
- BOOST_ASSERT(profiler);
+ ARMNN_ASSERT(profiler);
// Set the name of the output profiling file.
const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.json")
diff --git a/test/1.2/Capabilities.cpp b/test/1.2/Capabilities.cpp
index 5f817591..f25723de 100644
--- a/test/1.2/Capabilities.cpp
+++ b/test/1.2/Capabilities.cpp
@@ -7,6 +7,8 @@
#include "Utils.h"
+#include <armnn/utility/Assert.hpp>
+
#include <boost/test/unit_test.hpp>
#include <sys/system_properties.h>
@@ -60,8 +62,8 @@ void CheckOperandType(const V1_2::Capabilities& capabilities, V1_2::OperandType
{
using namespace armnn_driver::hal_1_2;
PerformanceInfo perfInfo = android::nn::lookup(capabilities.operandPerformance, type);
- BOOST_ASSERT(perfInfo.execTime == execTime);
- BOOST_ASSERT(perfInfo.powerUsage == powerUsage);
+ ARMNN_ASSERT(perfInfo.execTime == execTime);
+ ARMNN_ASSERT(perfInfo.powerUsage == powerUsage);
}
BOOST_FIXTURE_TEST_SUITE(CapabilitiesTests, CapabilitiesFixture)
@@ -92,7 +94,7 @@ BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesWithRuntime)
CheckOperandType(capabilities, V1_2::OperandType::OEM, FLT_MAX, FLT_MAX);
CheckOperandType(capabilities, V1_2::OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX);
- BOOST_ASSERT(error == V1_0::ErrorStatus::NONE);
+ ARMNN_ASSERT(error == V1_0::ErrorStatus::NONE);
};
__system_property_set("Armnn.operandTypeTensorFloat32Performance.execTime", "2.0f");
@@ -153,7 +155,7 @@ BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesUndefined)
CheckOperandType(capabilities, V1_2::OperandType::OEM, FLT_MAX, FLT_MAX);
CheckOperandType(capabilities, V1_2::OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX);
- BOOST_ASSERT(error == V1_0::ErrorStatus::NONE);
+ ARMNN_ASSERT(error == V1_0::ErrorStatus::NONE);
};
armnn::IRuntime::CreationOptions options;
diff --git a/test/TestTensor.cpp b/test/TestTensor.cpp
index a8045c5b..e6cb446f 100644
--- a/test/TestTensor.cpp
+++ b/test/TestTensor.cpp
@@ -25,7 +25,7 @@ unsigned int TestTensor::GetNumElements() const
const float * TestTensor::GetData() const
{
- BOOST_ASSERT(m_Data.empty() == false);
+ ARMNN_ASSERT(m_Data.empty() == false);
return &m_Data[0];
}
diff --git a/test/TestTensor.hpp b/test/TestTensor.hpp
index 623c9fbd..1cd1950d 100644
--- a/test/TestTensor.hpp
+++ b/test/TestTensor.hpp
@@ -6,6 +6,8 @@
#include "../ArmnnDriver.hpp"
+#include <armnn/utility/Assert.hpp>
+
namespace driverTestHelpers
{
@@ -17,7 +19,7 @@ public:
: m_Shape{shape}
, m_Data{data}
{
- BOOST_ASSERT(m_Shape.GetNumElements() == m_Data.size());
+ ARMNN_ASSERT(m_Shape.GetNumElements() == m_Data.size());
}
hidl_vec<uint32_t> GetDimensions() const;