aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-01 16:51:23 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-06 09:06:01 +0100
commitac2770a4bb6461bfbddec928bb6208f26f898f02 (patch)
treec72f67f648b7aca2f4bccf69b05d185bf5f9ccad /src/backends
parent7ee5d2c3b3cee5a924ed6347fef613ee07b5aca7 (diff)
downloadarmnn-ac2770a4bb6461bfbddec928bb6208f26f898f02.tar.gz
IVGCVSW-4485 Remove Boost assert
* Change boost assert to armnn assert * Change include file to armnn assert * Fix ARMNN_ASSERT_MSG issue with multiple conditions * Change BOOST_ASSERT to BOOST_TEST where appropriate * Remove unused include statements Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I5d0fa3a37b7c1c921216de68f0073aa34702c9ff
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.cpp2
-rw-r--r--src/backends/aclCommon/ArmComputeUtils.hpp5
-rw-r--r--src/backends/aclCommon/BaseMemoryManager.cpp14
-rw-r--r--src/backends/backendsCommon/CpuTensorHandle.cpp4
-rw-r--r--src/backends/backendsCommon/CpuTensorHandle.hpp10
-rw-r--r--src/backends/backendsCommon/LayerSupportRules.hpp4
-rw-r--r--src/backends/backendsCommon/MakeWorkloadHelper.hpp2
-rw-r--r--src/backends/backendsCommon/Workload.hpp18
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp2
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp16
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp8
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.hpp4
-rw-r--r--src/backends/backendsCommon/test/MockBackend.cpp10
-rw-r--r--src/backends/backendsCommon/test/WorkloadTestUtils.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp18
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp20
-rw-r--r--src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp2
-rw-r--r--src/backends/cl/ClBackendContext.cpp3
-rw-r--r--src/backends/cl/ClContextControl.cpp6
-rw-r--r--src/backends/cl/workloads/ClConstantWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp4
-rw-r--r--src/backends/neon/NeonInterceptorScheduler.cpp2
-rw-r--r--src/backends/neon/NeonTensorHandle.hpp4
-rw-r--r--src/backends/neon/NeonTimer.cpp5
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp4
-rw-r--r--src/backends/reference/RefLayerSupport.cpp4
-rw-r--r--src/backends/reference/RefMemoryManager.cpp12
-rw-r--r--src/backends/reference/RefTensorHandle.cpp10
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp25
-rw-r--r--src/backends/reference/workloads/BatchToSpaceNd.cpp10
-rw-r--r--src/backends/reference/workloads/Concatenate.cpp2
-rw-r--r--src/backends/reference/workloads/ConvImpl.cpp12
-rw-r--r--src/backends/reference/workloads/ConvImpl.hpp1
-rw-r--r--src/backends/reference/workloads/Decoders.hpp4
-rw-r--r--src/backends/reference/workloads/DepthToSpace.cpp4
-rw-r--r--src/backends/reference/workloads/Dequantize.cpp2
-rw-r--r--src/backends/reference/workloads/DetectionPostProcess.cpp6
-rw-r--r--src/backends/reference/workloads/Encoders.hpp6
-rw-r--r--src/backends/reference/workloads/FullyConnected.cpp2
-rw-r--r--src/backends/reference/workloads/Gather.cpp4
-rw-r--r--src/backends/reference/workloads/LogSoftmax.cpp4
-rw-r--r--src/backends/reference/workloads/Mean.cpp2
-rw-r--r--src/backends/reference/workloads/RefConstantWorkload.cpp6
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp6
-rw-r--r--src/backends/reference/workloads/RefStackWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefStridedSliceWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/Slice.cpp16
-rw-r--r--src/backends/reference/workloads/Softmax.cpp4
-rw-r--r--src/backends/reference/workloads/Splitter.cpp5
-rw-r--r--src/backends/reference/workloads/Splitter.hpp8
-rw-r--r--src/backends/reference/workloads/StridedSlice.cpp7
-rw-r--r--src/backends/reference/workloads/TensorBufferArrayView.hpp4
63 files changed, 193 insertions, 197 deletions
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index f5a9e05de9..7a75f9c872 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -42,7 +42,7 @@ arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multi
case armnn::DataType::Signed32:
return arm_compute::DataType::S32;
default:
- BOOST_ASSERT_MSG(false, "Unknown data type");
+ ARMNN_ASSERT_MSG(false, "Unknown data type");
return arm_compute::DataType::UNKNOWN;
}
}
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index 9c6f46462e..80bb7623e8 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -6,11 +6,10 @@
#include <armnn/Descriptors.hpp>
#include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
#include <arm_compute/core/Types.h>
-#include <boost/assert.hpp>
-
namespace armnn
{
@@ -161,7 +160,7 @@ inline unsigned int ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc,
unsigned int dim = tensor.GetNumDimensions();
- BOOST_ASSERT(dim != 0);
+ ARMNN_ASSERT(dim != 0);
// Currently ArmNN support axis 1.
return dim - 1;
diff --git a/src/backends/aclCommon/BaseMemoryManager.cpp b/src/backends/aclCommon/BaseMemoryManager.cpp
index 844fbcd4ca..b43eaf8da3 100644
--- a/src/backends/aclCommon/BaseMemoryManager.cpp
+++ b/src/backends/aclCommon/BaseMemoryManager.cpp
@@ -19,7 +19,7 @@ namespace armnn
BaseMemoryManager::BaseMemoryManager(std::unique_ptr<arm_compute::IAllocator> alloc,
MemoryAffinity memoryAffinity)
{
- BOOST_ASSERT(alloc);
+ ARMNN_ASSERT(alloc);
m_Allocator = std::move(alloc);
m_IntraLayerMemoryMgr = CreateArmComputeMemoryManager(memoryAffinity);
@@ -51,30 +51,30 @@ void BaseMemoryManager::Acquire()
static const size_t s_NumPools = 1;
// Allocate memory pools for intra-layer memory manager
- BOOST_ASSERT(m_IntraLayerMemoryMgr);
+ ARMNN_ASSERT(m_IntraLayerMemoryMgr);
m_IntraLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
// Allocate memory pools for inter-layer memory manager
- BOOST_ASSERT(m_InterLayerMemoryMgr);
+ ARMNN_ASSERT(m_InterLayerMemoryMgr);
m_InterLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
// Acquire inter-layer memory group. NOTE: This has to come after allocating the pools
- BOOST_ASSERT(m_InterLayerMemoryGroup);
+ ARMNN_ASSERT(m_InterLayerMemoryGroup);
m_InterLayerMemoryGroup->acquire();
}
void BaseMemoryManager::Release()
{
// Release inter-layer memory group. NOTE: This has to come before releasing the pools
- BOOST_ASSERT(m_InterLayerMemoryGroup);
+ ARMNN_ASSERT(m_InterLayerMemoryGroup);
m_InterLayerMemoryGroup->release();
// Release memory pools managed by intra-layer memory manager
- BOOST_ASSERT(m_IntraLayerMemoryMgr);
+ ARMNN_ASSERT(m_IntraLayerMemoryMgr);
m_IntraLayerMemoryMgr->clear();
// Release memory pools managed by inter-layer memory manager
- BOOST_ASSERT(m_InterLayerMemoryMgr);
+ ARMNN_ASSERT(m_InterLayerMemoryMgr);
m_InterLayerMemoryMgr->clear();
}
#else
diff --git a/src/backends/backendsCommon/CpuTensorHandle.cpp b/src/backends/backendsCommon/CpuTensorHandle.cpp
index 65e6c47179..7bcf59fdf1 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.cpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.cpp
@@ -118,8 +118,8 @@ void ScopedCpuTensorHandle::CopyFrom(const ScopedCpuTensorHandle& other)
void ScopedCpuTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes)
{
- BOOST_ASSERT(GetTensor<void>() == nullptr);
- BOOST_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
+ ARMNN_ASSERT(GetTensor<void>() == nullptr);
+ ARMNN_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
if (srcMemory)
{
diff --git a/src/backends/backendsCommon/CpuTensorHandle.hpp b/src/backends/backendsCommon/CpuTensorHandle.hpp
index e6e59fcd4f..78efb08f99 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.hpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.hpp
@@ -14,7 +14,7 @@
#include <algorithm>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -30,7 +30,7 @@ public:
template <typename T>
const T* GetConstTensor() const
{
- BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
+ ARMNN_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
return reinterpret_cast<const T*>(m_Memory);
}
@@ -59,8 +59,8 @@ protected:
private:
// Only used for testing
- void CopyOutTo(void *) const override { BOOST_ASSERT_MSG(false, "Unimplemented"); }
- void CopyInFrom(const void*) override { BOOST_ASSERT_MSG(false, "Unimplemented"); }
+ void CopyOutTo(void *) const override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
+ void CopyInFrom(const void*) override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
ConstCpuTensorHandle(const ConstCpuTensorHandle& other) = delete;
ConstCpuTensorHandle& operator=(const ConstCpuTensorHandle& other) = delete;
@@ -79,7 +79,7 @@ public:
template <typename T>
T* GetTensor() const
{
- BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
+ ARMNN_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
return reinterpret_cast<T*>(m_MutableMemory);
}
diff --git a/src/backends/backendsCommon/LayerSupportRules.hpp b/src/backends/backendsCommon/LayerSupportRules.hpp
index 03bec53353..ddecc82172 100644
--- a/src/backends/backendsCommon/LayerSupportRules.hpp
+++ b/src/backends/backendsCommon/LayerSupportRules.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <algorithm>
namespace armnn
@@ -30,7 +30,7 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
case armnn::DataType::QAsymmS8:
return armnn::DataType::Signed32;
default:
- BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+ ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
}
return armnn::EmptyOptional();
}
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 8abc8a6ef5..560182286e 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -70,7 +70,7 @@ std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descrip
case DataType::QSymmS16:
return nullptr;
default:
- BOOST_ASSERT_MSG(false, "Unknown DataType.");
+ ARMNN_ASSERT_MSG(false, "Unknown DataType.");
return nullptr;
}
}
diff --git a/src/backends/backendsCommon/Workload.hpp b/src/backends/backendsCommon/Workload.hpp
index 984443b79b..244b5f1249 100644
--- a/src/backends/backendsCommon/Workload.hpp
+++ b/src/backends/backendsCommon/Workload.hpp
@@ -65,9 +65,9 @@ public:
if (std::find(dataTypes.begin(), dataTypes.end(), expectedInputType) == dataTypes.end())
{
- BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+ ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
}
- BOOST_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()),
+ ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()),
info.m_InputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == expectedInputType;
@@ -84,14 +84,14 @@ public:
{
if (expectedOutputType != expectedInputType)
{
- BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+ ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
}
}
else if (std::find(dataTypes.begin(), dataTypes.end(), expectedOutputType) == dataTypes.end())
{
- BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+ ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
}
- BOOST_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()),
+ ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()),
info.m_OutputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == expectedOutputType;
@@ -109,14 +109,14 @@ public:
MultiTypedWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<QueueDescriptor>(descriptor, info)
{
- BOOST_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(),
+ ARMNN_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(),
info.m_InputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == InputDataType;
}),
"Trying to create workload with incorrect type");
- BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
+ ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
info.m_OutputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == OutputDataType;
@@ -136,11 +136,11 @@ public:
{
if (!info.m_InputTensorInfos.empty())
{
- BOOST_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType,
+ ARMNN_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType,
"Trying to create workload with incorrect type");
}
- BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
+ ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
info.m_OutputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == DataType;
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index f968ad78f7..1f4a849ee9 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -40,7 +40,7 @@ DataType GetBiasDataType(DataType inputDataType)
case DataType::QSymmS16:
return DataType::Signed32;
default:
- BOOST_ASSERT_MSG(false, "Invalid input data type");
+ ARMNN_ASSERT_MSG(false, "Invalid input data type");
return DataType::Float32;
}
}
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 5628c36884..a7e8576668 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -194,7 +194,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
dataType);
const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
@@ -244,7 +244,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
dataType);
const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
@@ -335,7 +335,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
TensorInfo biasInfo;
const TensorInfo * biasInfoPtr = nullptr;
@@ -347,7 +347,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
biasInfoPtr = &biasInfo;
}
@@ -381,7 +381,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
}
default:
{
- BOOST_ASSERT_MSG(false, "Unexpected bias type");
+ ARMNN_ASSERT_MSG(false, "Unexpected bias type");
}
}
}
@@ -1156,12 +1156,12 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
Optional<TensorInfo> biases;
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
GetBiasTypeFromWeightsType(dataType));
}
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
result = layerSupportObject->IsTransposeConvolution2dSupported(input,
@@ -1175,7 +1175,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
}
default:
{
- BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
+ ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
reason.value() = "Unrecognised layer type";
result = false;
break;
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index 3b3959ba9f..bd5e81e678 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -13,8 +13,8 @@ namespace armnn
armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
const PermutationVector& permutationVector, void* permuteBuffer)
{
- BOOST_ASSERT_MSG(tensor, "Invalid input tensor");
- BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
+ ARMNN_ASSERT_MSG(tensor, "Invalid input tensor");
+ ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
TensorInfo tensorInfo = tensor->GetTensorInfo();
@@ -133,8 +133,8 @@ armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle*
DataLayout dataLayout,
void* permuteBuffer)
{
- BOOST_ASSERT_MSG(weightTensor, "Invalid input tensor");
- BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
+ ARMNN_ASSERT_MSG(weightTensor, "Invalid input tensor");
+ ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
auto multiplier = weightTensor->GetTensorInfo().GetShape()[0];
auto inputChannels = weightTensor->GetTensorInfo().GetShape()[1];
diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp
index 66056db4ca..a4da924725 100644
--- a/src/backends/backendsCommon/WorkloadUtils.hpp
+++ b/src/backends/backendsCommon/WorkloadUtils.hpp
@@ -168,8 +168,8 @@ void CopyTensorContentsGeneric(const ITensorHandle* srcTensor, ITensorHandle* ds
auto dstPtrChannel = dstData;
for (unsigned int w = 0; w < copyWidth; ++w)
{
- BOOST_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
- BOOST_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
+ ARMNN_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
+ ARMNN_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
copy(dstData, srcData, copyLength);
dstData += dstWidthStride;
srcData += srcWidthStride;
diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp
index 116bf77c63..abdaa8131b 100644
--- a/src/backends/backendsCommon/test/MockBackend.cpp
+++ b/src/backends/backendsCommon/test/MockBackend.cpp
@@ -23,7 +23,7 @@ namespace
bool IsLayerSupported(const armnn::Layer* layer)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::LayerType layerType = layer->GetType();
switch (layerType)
@@ -47,7 +47,7 @@ bool IsLayerSupported(const armnn::Layer& layer)
bool IsLayerOptimizable(const armnn::Layer* layer)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
// A Layer is not optimizable if its name contains "unoptimizable"
const std::string layerName(layer->GetName());
@@ -191,7 +191,7 @@ OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph
supportedSubgraphs.end(),
[&optimizationViews](const SubgraphView::SubgraphViewPtr& supportedSubgraph)
{
- BOOST_ASSERT(supportedSubgraph != nullptr);
+ ARMNN_ASSERT(supportedSubgraph != nullptr);
PreCompiledLayer* preCompiledLayer =
optimizationViews.GetGraph().AddLayer<PreCompiledLayer>(
@@ -228,7 +228,7 @@ OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph
unsupportedSubgraphs.end(),
[&optimizationViews](const SubgraphView::SubgraphViewPtr& unsupportedSubgraph)
{
- BOOST_ASSERT(unsupportedSubgraph != nullptr);
+ ARMNN_ASSERT(unsupportedSubgraph != nullptr);
optimizationViews.AddFailedSubgraph(SubgraphView(*unsupportedSubgraph));
});
@@ -256,7 +256,7 @@ OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph
untouchedSubgraphs.end(),
[&optimizationViews](const SubgraphView::SubgraphViewPtr& untouchedSubgraph)
{
- BOOST_ASSERT(untouchedSubgraph != nullptr);
+ ARMNN_ASSERT(untouchedSubgraph != nullptr);
optimizationViews.AddUntouchedSubgraph(SubgraphView(*untouchedSubgraph));
});
diff --git a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
index df001b7530..9f38e47715 100644
--- a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
+++ b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
@@ -106,7 +106,7 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
case armnn::DataType::QSymmS16:
return armnn::DataType::Signed32;
default:
- BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+ ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
}
return armnn::EmptyOptional();
}
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 319434e093..a82048cd81 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -1212,9 +1212,9 @@ LayerTestResult<T,4> CompareActivationTestImpl(
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
- BOOST_ASSERT(workload != nullptr);
+ ARMNN_ASSERT(workload != nullptr);
std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
- BOOST_ASSERT(workloadRef != nullptr);
+ ARMNN_ASSERT(workloadRef != nullptr);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index 2156b0ee9e..a6b703b08b 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -5,7 +5,7 @@
#include "ComparisonTestImpl.hpp"
-
+#include <armnn/utility/Assert.hpp>
#include <Half.hpp>
#include <QuantizeHelper.hpp>
#include <ResolveType.hpp>
@@ -18,8 +18,6 @@
#include <test/TensorHelpers.hpp>
-#include <boost/assert.hpp>
-
namespace
{
@@ -44,13 +42,13 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
int outQuantOffset)
{
IgnoreUnused(memoryManager);
- BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
+ ARMNN_ASSERT(shape0.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
- BOOST_ASSERT(shape1.GetNumDimensions() == NumDims);
+ ARMNN_ASSERT(shape1.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo1(shape1, ArmnnInType, quantScale1, quantOffset1);
- BOOST_ASSERT(outShape.GetNumDimensions() == NumDims);
+ ARMNN_ASSERT(outShape.GetNumDimensions() == NumDims);
armnn::TensorInfo outputTensorInfo(outShape, armnn::DataType::Boolean, outQuantScale, outQuantOffset);
auto input0 = MakeTensor<InType, NumDims>(inputTensorInfo0, values0);
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 1e40b42dcf..9e08e30dec 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -61,7 +61,7 @@ bool NeedPermuteForConcat(
}
else
{
- BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
+ ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
"Input shapes must have the same number of dimensions");
}
}
@@ -92,7 +92,7 @@ void Generate3dPermuteVectorForConcat(
unsigned int & concatDim,
std::pair<PermutationVector, PermutationVector> & permutations)
{
- BOOST_ASSERT_MSG(numDimensions <= 3,
+ ARMNN_ASSERT_MSG(numDimensions <= 3,
"Only dimensions 1,2 and 3 are supported by this helper");
unsigned int expandedBy = 3 - numDimensions;
unsigned int expandedConcatAxis = concatDim + expandedBy;
@@ -113,7 +113,7 @@ void Generate3dPermuteVectorForConcat(
}
else
{
- BOOST_ASSERT(expandedConcatAxis == 0);
+ ARMNN_ASSERT(expandedConcatAxis == 0);
concatDim = 0;
}
}
@@ -127,7 +127,7 @@ template<typename T> void PermuteTensorData(
std::vector<T>& outputData)
{
IgnoreUnused(memoryManager);
- BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
+ ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
if (inputData == nullptr)
{
// Nullptr is an error in the test. By returning without doing the concatenation
@@ -179,7 +179,7 @@ template<typename T> void PermuteInputsForConcat(
TensorInfo & outputTensorInfo)
{
IgnoreUnused(memoryManager);
- BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
+ ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1,
"Expecting more than one tensor to be concatenated here");
unsigned int numDims = 0;
@@ -200,12 +200,12 @@ template<typename T> void PermuteInputsForConcat(
// Store the reverese permutation.
permuteVector = permutations.second;
- BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
+ ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity),
"Test logic error, we don't need permutation, so we shouldn't arrive here");
}
else
{
- BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
+ ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
"All inputs must have the same number of dimensions");
}
@@ -244,7 +244,7 @@ template <typename T> void PermuteOutputForConcat(
std::unique_ptr<ITensorHandle> && inputDataHandle,
T * data)
{
- BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
+ ARMNN_ASSERT_MSG(data != nullptr, "data must not be null");
if (data == nullptr)
{
// Nullptr is an error in the test. By returning without doing the permutation
@@ -279,7 +279,7 @@ template<typename T> void Concatenate(
unsigned int concatDim,
bool useSubtensor)
{
- BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
+ ARMNN_ASSERT_MSG(output != nullptr, "output must not be null");
if (output == nullptr)
{
// Nullptr is an error in the test. By returning without doing the permutation
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 50ad667dde..c66027efdf 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -169,9 +169,9 @@ template<typename T, typename B>
void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
{
- BOOST_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
+ ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
"Invalid type and parameter combination.");
- BOOST_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
+ ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
"Invalid type and parameter combination.");
// Note we need to dequantize and re-quantize the image value and the bias.
@@ -183,7 +183,7 @@ void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
for (uint32_t x = 0; x < w; ++x)
{
uint32_t offset = (i * h + y) * w + x;
- BOOST_ASSERT(offset < v.size());
+ ARMNN_ASSERT(offset < v.size());
T& outRef = v[offset];
float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
@@ -236,11 +236,11 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
bool biasEnabled = bias.size() > 0;
// This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
- BOOST_ASSERT(inputNum == 1);
- BOOST_ASSERT(outputNum == 1);
+ ARMNN_ASSERT(inputNum == 1);
+ ARMNN_ASSERT(outputNum == 1);
// If a bias is used, its size must equal the number of output channels.
- BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Note these tensors will use two (identical) batches.
@@ -1627,7 +1627,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
// If a bias is used, its size must equal the number of output channels.
bool biasEnabled = bias.size() > 0;
- BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Creates the tensors.
armnn::TensorInfo inputTensorInfo =
@@ -2135,11 +2135,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
bool biasEnabled = bias.size() > 0;
// This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
- BOOST_ASSERT(inputNum == 1);
- BOOST_ASSERT(outputNum == 1);
+ ARMNN_ASSERT(inputNum == 1);
+ ARMNN_ASSERT(outputNum == 1);
// If a bias is used, its size must equal the number of output channels.
- BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Note these tensors will use two (identical) batches.
diff --git a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
index c277d2d5e1..c64fc88024 100644
--- a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
+++ b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
@@ -6,6 +6,7 @@
#pragma once
#include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
#include <boost/multi_array.hpp>
@@ -14,7 +15,7 @@
template <std::size_t n>
boost::array<unsigned int, n> GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo)
{
- BOOST_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
+ ARMNN_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
"Attempting to construct a shape array of mismatching size");
boost::array<unsigned int, n> shape;
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index 772ae2ccc7..953b543acb 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -104,7 +104,7 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
outputHandle->Allocate();
CopyDataToITensorHandle(inputHandle.get(), input.origin());
- BOOST_ASSERT(workload);
+ ARMNN_ASSERT(workload);
ExecuteWorkload(*workload, memoryManager);
diff --git a/src/backends/cl/ClBackendContext.cpp b/src/backends/cl/ClBackendContext.cpp
index 068e2958af..f612c3743d 100644
--- a/src/backends/cl/ClBackendContext.cpp
+++ b/src/backends/cl/ClBackendContext.cpp
@@ -7,6 +7,7 @@
#include "ClContextControl.hpp"
#include <armnn/Logging.hpp>
+#include <armnn/utility/Assert.hpp>
#include <arm_compute/core/CL/OpenCL.h>
#include <arm_compute/core/CL/CLKernelLibrary.h>
@@ -184,7 +185,7 @@ ClBackendContext::ClBackendContext(const IRuntime::CreationOptions& options)
return TuningLevel::Exhaustive;
default:
{
- BOOST_ASSERT_MSG(false, "Tuning level not recognised.");
+ ARMNN_ASSERT_MSG(false, "Tuning level not recognised.");
return TuningLevel::None;
}
}
diff --git a/src/backends/cl/ClContextControl.cpp b/src/backends/cl/ClContextControl.cpp
index f307133085..dbcccce945 100644
--- a/src/backends/cl/ClContextControl.cpp
+++ b/src/backends/cl/ClContextControl.cpp
@@ -9,12 +9,12 @@
#include <LeakChecking.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <arm_compute/core/CL/CLKernelLibrary.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/polymorphic_cast.hpp>
@@ -59,11 +59,11 @@ ClContextControl::ClContextControl(arm_compute::CLTuner *tuner,
// Removes the use of global CL context.
cl::Context::setDefault(cl::Context{});
- BOOST_ASSERT(cl::Context::getDefault()() == NULL);
+ ARMNN_ASSERT(cl::Context::getDefault()() == NULL);
// Removes the use of global CL command queue.
cl::CommandQueue::setDefault(cl::CommandQueue{});
- BOOST_ASSERT(cl::CommandQueue::getDefault()() == NULL);
+ ARMNN_ASSERT(cl::CommandQueue::getDefault()() == NULL);
// Always load the OpenCL runtime.
LoadOpenClRuntime();
diff --git a/src/backends/cl/workloads/ClConstantWorkload.cpp b/src/backends/cl/workloads/ClConstantWorkload.cpp
index 39ae14eaf3..e928870324 100644
--- a/src/backends/cl/workloads/ClConstantWorkload.cpp
+++ b/src/backends/cl/workloads/ClConstantWorkload.cpp
@@ -33,7 +33,7 @@ void ClConstantWorkload::Execute() const
{
const ConstantQueueDescriptor& data = this->m_Data;
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_ASSERT(data.m_LayerOutput != nullptr);
arm_compute::CLTensor& output = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetDataType();
@@ -56,7 +56,7 @@ void ClConstantWorkload::Execute() const
}
default:
{
- BOOST_ASSERT_MSG(false, "Unknown data type");
+ ARMNN_ASSERT_MSG(false, "Unknown data type");
break;
}
}
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index e8af0ee3b7..73ec95ce9f 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -38,7 +38,7 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 858eab4e00..8704b1276f 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -45,7 +45,7 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
@@ -125,7 +125,7 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
arm_compute::ActivationLayerInfo(),
aclDilationInfo);
- BOOST_ASSERT(m_DepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
InitializeArmComputeClTensorData(*m_KernelTensor, &weightsPermutedHandle);
@@ -148,7 +148,7 @@ void ClDepthwiseConvolutionWorkload::FreeUnusedTensors()
void ClDepthwiseConvolutionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionWorkload_Execute");
- BOOST_ASSERT(m_DepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
index 7c0736645b..20b2104c62 100644
--- a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
@@ -38,7 +38,7 @@ arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo& i
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index b4bcc1c017..54e7717b7d 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -90,7 +90,7 @@ inline auto SetClSliceData(const std::vector<unsigned int>& m_begin,
inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
const ConstCpuTensorHandle* handle)
{
- BOOST_ASSERT(handle);
+ ARMNN_ASSERT(handle);
armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
switch(handle->GetTensorInfo().GetDataType())
@@ -116,7 +116,7 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>());
break;
default:
- BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+ ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
}
};
diff --git a/src/backends/neon/NeonInterceptorScheduler.cpp b/src/backends/neon/NeonInterceptorScheduler.cpp
index d8dd01bd6c..745c5fde62 100644
--- a/src/backends/neon/NeonInterceptorScheduler.cpp
+++ b/src/backends/neon/NeonInterceptorScheduler.cpp
@@ -5,8 +5,6 @@
#include "NeonInterceptorScheduler.hpp"
-#include <boost/assert.hpp>
-
namespace armnn{
NeonInterceptorScheduler::NeonInterceptorScheduler(arm_compute::IScheduler &realScheduler)
diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp
index 11d20878d7..fb2c2b5128 100644
--- a/src/backends/neon/NeonTensorHandle.hpp
+++ b/src/backends/neon/NeonTensorHandle.hpp
@@ -7,6 +7,8 @@
#include <BFloat16.hpp>
#include <Half.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <aclCommon/ArmComputeTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
@@ -61,7 +63,7 @@ public:
// If we have enabled Importing, don't manage the tensor
if (!m_IsImportEnabled)
{
- BOOST_ASSERT(m_MemoryGroup != nullptr);
+ ARMNN_ASSERT(m_MemoryGroup != nullptr);
m_MemoryGroup->manage(&m_Tensor);
}
}
diff --git a/src/backends/neon/NeonTimer.cpp b/src/backends/neon/NeonTimer.cpp
index 219edc9680..1079a0d57c 100644
--- a/src/backends/neon/NeonTimer.cpp
+++ b/src/backends/neon/NeonTimer.cpp
@@ -6,9 +6,10 @@
#include "NeonTimer.hpp"
#include "NeonInterceptorScheduler.hpp"
+#include <armnn/utility/Assert.hpp>
+
#include <memory>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
namespace armnn
@@ -21,7 +22,7 @@ static thread_local auto g_Interceptor = std::make_shared<NeonInterceptorSchedul
void NeonTimer::Start()
{
m_Kernels.clear();
- BOOST_ASSERT(g_Interceptor->GetKernels() == nullptr);
+ ARMNN_ASSERT(g_Interceptor->GetKernels() == nullptr);
g_Interceptor->SetKernels(&m_Kernels);
m_RealSchedulerType = arm_compute::Scheduler::get_type();
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index 83a2692b6e..b9cb807779 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -39,7 +39,7 @@ void NeonConstantWorkload::Execute() const
{
const ConstantQueueDescriptor& data = this->m_Data;
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_ASSERT(data.m_LayerOutput != nullptr);
arm_compute::ITensor& output =
boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType =
@@ -69,7 +69,7 @@ void NeonConstantWorkload::Execute() const
}
default:
{
- BOOST_ASSERT_MSG(false, "Unknown data type");
+ ARMNN_ASSERT_MSG(false, "Unknown data type");
break;
}
}
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 683decd45c..5d45642eef 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -37,7 +37,7 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
@@ -97,7 +97,7 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
m_ConvolutionLayer.reset(convolutionLayer.release());
- BOOST_ASSERT(m_ConvolutionLayer);
+ ARMNN_ASSERT(m_ConvolutionLayer);
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index e39fe54199..a9a3c75bfd 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -49,7 +49,7 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
@@ -127,7 +127,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
arm_compute::ActivationLayerInfo(),
aclDilationInfo);
- BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle);
@@ -144,7 +144,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
void NeonDepthwiseConvolutionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDepthwiseConvolutionWorkload_Execute");
- BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
m_pDepthwiseConvolutionLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
index c62f71948c..ffca2076fe 100644
--- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
@@ -38,7 +38,7 @@ arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo&
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
@@ -81,7 +81,7 @@ NeonTransposeConvolution2dWorkload::NeonTransposeConvolution2dWorkload(
m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager);
m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo);
- BOOST_ASSERT(m_Layer);
+ ARMNN_ASSERT(m_Layer);
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 3f0fe842aa..c3c9d3dbbc 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -35,7 +35,7 @@ void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
const ConstCpuTensorHandle* handle)
{
- BOOST_ASSERT(handle);
+ ARMNN_ASSERT(handle);
switch(handle->GetTensorInfo().GetDataType())
{
@@ -59,7 +59,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
break;
default:
- BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+ ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
}
};
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 607c86b112..25d639a38a 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -348,7 +348,7 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
"Reference concatenation: output type not supported");
for (const TensorInfo* input : inputs)
{
- BOOST_ASSERT(input != nullptr);
+ ARMNN_ASSERT(input != nullptr);
supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
"Reference concatenation: input type not supported");
@@ -1864,7 +1864,7 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp
"Reference stack: output type not supported");
for (const TensorInfo* input : inputs)
{
- BOOST_ASSERT(input != nullptr);
+ ARMNN_ASSERT(input != nullptr);
supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
"Reference stack: input type not supported");
diff --git a/src/backends/reference/RefMemoryManager.cpp b/src/backends/reference/RefMemoryManager.cpp
index 4f15e39ee1..76054e41e1 100644
--- a/src/backends/reference/RefMemoryManager.cpp
+++ b/src/backends/reference/RefMemoryManager.cpp
@@ -4,7 +4,7 @@
//
#include "RefMemoryManager.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <algorithm>
@@ -35,7 +35,7 @@ RefMemoryManager::Pool* RefMemoryManager::Manage(unsigned int numBytes)
void RefMemoryManager::Allocate(RefMemoryManager::Pool* pool)
{
- BOOST_ASSERT(pool);
+ ARMNN_ASSERT(pool);
m_FreePools.push_back(pool);
}
@@ -75,25 +75,25 @@ RefMemoryManager::Pool::~Pool()
void* RefMemoryManager::Pool::GetPointer()
{
- BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
+ ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
return m_Pointer;
}
void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
{
- BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+ ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
m_Size = std::max(m_Size, numBytes);
}
void RefMemoryManager::Pool::Acquire()
{
- BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
+ ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
m_Pointer = ::operator new(size_t(m_Size));
}
void RefMemoryManager::Pool::Release()
{
- BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
+ ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
::operator delete(m_Pointer);
m_Pointer = nullptr;
}
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index 84a74edc1d..7d86b110a7 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -44,8 +44,8 @@ RefTensorHandle::~RefTensorHandle()
void RefTensorHandle::Manage()
{
- BOOST_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
- BOOST_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+ ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
+ ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
}
@@ -84,7 +84,7 @@ void* RefTensorHandle::GetPointer() const
}
else
{
- BOOST_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
+ ARMNN_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
return m_MemoryManager->GetPointer(m_Pool);
}
}
@@ -92,14 +92,14 @@ void* RefTensorHandle::GetPointer() const
void RefTensorHandle::CopyOutTo(void* dest) const
{
const void *src = GetPointer();
- BOOST_ASSERT(src);
+ ARMNN_ASSERT(src);
memcpy(dest, src, m_TensorInfo.GetNumBytes());
}
void RefTensorHandle::CopyInFrom(const void* src)
{
void *dest = GetPointer();
- BOOST_ASSERT(dest);
+ ARMNN_ASSERT(dest);
memcpy(dest, src, m_TensorInfo.GetNumBytes());
}
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index f43e8b67a9..be20644ab7 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -5,14 +5,13 @@
#pragma once
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
#include <ResolveType.hpp>
-#include <boost/assert.hpp>
-
namespace armnn
{
@@ -78,28 +77,28 @@ public:
TypedIterator& operator++() override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
++m_Iterator;
return *this;
}
TypedIterator& operator+=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator += increment;
return *this;
}
TypedIterator& operator-=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator -= increment;
return *this;
}
TypedIterator& operator[](const unsigned int index) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
return *this;
}
@@ -107,7 +106,7 @@ public:
TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
{
IgnoreUnused(axisIndex);
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
return *this;
}
@@ -504,7 +503,7 @@ public:
// This should be called to set index for per-axis Encoder/Decoder
PerAxisIterator& SetIndex(unsigned int index, unsigned int axisIndex) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
m_AxisIndex = axisIndex;
return *this;
@@ -519,7 +518,7 @@ public:
PerAxisIterator& operator++() override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
++m_Iterator;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
@@ -527,7 +526,7 @@ public:
PerAxisIterator& operator+=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator += increment;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
@@ -535,7 +534,7 @@ public:
PerAxisIterator& operator-=(const unsigned int decrement) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator -= decrement;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
@@ -543,7 +542,7 @@ public:
PerAxisIterator& operator[](const unsigned int index) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp
index 7efdb9b75c..bf7de1b04c 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.cpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp
@@ -9,7 +9,7 @@
#include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
using namespace armnnUtils;
@@ -42,11 +42,11 @@ void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
{
TensorShape inputShape = inputTensorInfo.GetShape();
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
TensorShape outputShape = outputTensorInfo.GetShape();
- BOOST_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
+ ARMNN_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
const unsigned int inputBatchSize = inputShape[0];
const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()];
@@ -55,12 +55,12 @@ void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()];
const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()];
- BOOST_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
+ ARMNN_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
const unsigned int blockShapeHeight = blockShape[0];
const unsigned int blockShapeWidth = blockShape[1];
- BOOST_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
+ ARMNN_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
const unsigned int cropsTop = cropsData[0].first;
const unsigned int cropsLeft = cropsData[1].first;
diff --git a/src/backends/reference/workloads/Concatenate.cpp b/src/backends/reference/workloads/Concatenate.cpp
index bb55424c0c..a85e34ee61 100644
--- a/src/backends/reference/workloads/Concatenate.cpp
+++ b/src/backends/reference/workloads/Concatenate.cpp
@@ -38,7 +38,7 @@ void Concatenate(const ConcatQueueDescriptor &data)
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
- BOOST_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
+ ARMNN_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
diff --git a/src/backends/reference/workloads/ConvImpl.cpp b/src/backends/reference/workloads/ConvImpl.cpp
index 0c13e3ba0d..9d2f410a25 100644
--- a/src/backends/reference/workloads/ConvImpl.cpp
+++ b/src/backends/reference/workloads/ConvImpl.cpp
@@ -5,7 +5,7 @@
#include "ConvImpl.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cmath>
#include <limits>
@@ -15,7 +15,7 @@ namespace armnn
QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier)
{
- BOOST_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
+ ARMNN_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
if (multiplier == 0.0f)
{
m_Multiplier = 0;
@@ -26,14 +26,14 @@ QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multi
const double q = std::frexp(multiplier, &m_RightShift);
m_RightShift = -m_RightShift;
int64_t qFixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
- BOOST_ASSERT(qFixed <= (1ll << 31));
+ ARMNN_ASSERT(qFixed <= (1ll << 31));
if (qFixed == (1ll << 31))
{
qFixed /= 2;
--m_RightShift;
}
- BOOST_ASSERT(m_RightShift >= 0);
- BOOST_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
+ ARMNN_ASSERT(m_RightShift >= 0);
+ ARMNN_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
m_Multiplier = static_cast<int32_t>(qFixed);
}
}
@@ -61,7 +61,7 @@ int32_t QuantizedMultiplierSmallerThanOne::SaturatingRoundingDoublingHighMul(int
int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent)
{
- BOOST_ASSERT(exponent >= 0 && exponent <= 31);
+ ARMNN_ASSERT(exponent >= 0 && exponent <= 31);
int32_t mask = (1 << exponent) - 1;
int32_t remainder = x & mask;
int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
diff --git a/src/backends/reference/workloads/ConvImpl.hpp b/src/backends/reference/workloads/ConvImpl.hpp
index 562fd3e296..f5aa8f3447 100644
--- a/src/backends/reference/workloads/ConvImpl.hpp
+++ b/src/backends/reference/workloads/ConvImpl.hpp
@@ -15,7 +15,6 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <cmath>
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 3434ccb764..deb3b1f4b2 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -10,7 +10,7 @@
#include <armnnUtils/FloatingPointConverter.hpp>
#include <armnnUtils/TensorUtils.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -142,7 +142,7 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
+ ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
break;
}
}
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
index 91ca160ae2..f5e9ec5498 100644
--- a/src/backends/reference/workloads/DepthToSpace.cpp
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -8,7 +8,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
using namespace armnnUtils;
@@ -22,7 +22,7 @@ void DepthToSpace(const TensorInfo& inputInfo,
unsigned int dataTypeSize)
{
const unsigned int blockSize = descriptor.m_BlockSize;
- BOOST_ASSERT(blockSize != 0u);
+ ARMNN_ASSERT(blockSize != 0u);
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int batches = inputShape[0];
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
index 63c0405efe..fdc8e30c75 100644
--- a/src/backends/reference/workloads/Dequantize.cpp
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -16,7 +16,7 @@ void Dequantize(Decoder<float>& inputDecoder,
const TensorInfo& outputInfo)
{
IgnoreUnused(outputInfo);
- BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+ ARMNN_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
{
// inputDecoder.Get() dequantizes the data element from whatever
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index 57cf01e4a1..61a504ec6b 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -5,8 +5,8 @@
#include "DetectionPostProcess.hpp"
+#include <armnn/utility/Assert.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <algorithm>
@@ -213,8 +213,8 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
// xmax
boxCorners[indexW] = xCentre + halfW;
- BOOST_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
- BOOST_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
+ ARMNN_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
+ ARMNN_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
}
unsigned int numClassesWithBg = desc.m_NumClasses + 1;
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index e93987da31..c0524a7719 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -9,7 +9,7 @@
#include <armnnUtils/TensorUtils.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -89,7 +89,7 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported target Data Type!");
+ ARMNN_ASSERT_MSG(false, "Unsupported target Data Type!");
break;
}
}
@@ -107,7 +107,7 @@ inline std::unique_ptr<Encoder<bool>> MakeEncoder(const TensorInfo& info, void*
}
default:
{
- BOOST_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
+ ARMNN_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
break;
}
}
diff --git a/src/backends/reference/workloads/FullyConnected.cpp b/src/backends/reference/workloads/FullyConnected.cpp
index 02d9b060ef..5a87520f84 100644
--- a/src/backends/reference/workloads/FullyConnected.cpp
+++ b/src/backends/reference/workloads/FullyConnected.cpp
@@ -7,8 +7,6 @@
#include "RefWorkloadUtils.hpp"
-#include <boost/assert.hpp>
-
namespace armnn
{
diff --git a/src/backends/reference/workloads/Gather.cpp b/src/backends/reference/workloads/Gather.cpp
index 4cf3a142a0..c23edcd3bd 100644
--- a/src/backends/reference/workloads/Gather.cpp
+++ b/src/backends/reference/workloads/Gather.cpp
@@ -36,7 +36,7 @@ void Gather(const TensorInfo& paramsInfo,
{
unsigned int indx = boost::numeric_cast<unsigned int>(indices[i]);
- BOOST_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
+ ARMNN_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
unsigned int startOffset = indx * paramsProduct;
unsigned int endOffset = startOffset + paramsProduct;
@@ -51,7 +51,7 @@ void Gather(const TensorInfo& paramsInfo,
}
}
- BOOST_ASSERT(outIndex == outputInfo.GetNumElements());
+ ARMNN_ASSERT(outIndex == outputInfo.GetNumElements());
}
} //namespace armnn
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index 103d62a8df..1998f50c87 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -6,11 +6,11 @@
#include "LogSoftmax.hpp"
#include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <cmath>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace
@@ -35,7 +35,7 @@ void LogSoftmax(Decoder<float>& input,
const unsigned int numDimensions = inputInfo.GetNumDimensions();
bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
- BOOST_ASSERT_MSG(axisIsValid,
+ ARMNN_ASSERT_MSG(axisIsValid,
"Axis index is not in range [-numDimensions, numDimensions).");
IgnoreUnused(axisIsValid);
diff --git a/src/backends/reference/workloads/Mean.cpp b/src/backends/reference/workloads/Mean.cpp
index f2c0a4fc3f..72080ef042 100644
--- a/src/backends/reference/workloads/Mean.cpp
+++ b/src/backends/reference/workloads/Mean.cpp
@@ -128,7 +128,7 @@ void Mean(const armnn::TensorInfo& inputInfo,
for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
{
unsigned int current = inputDims[resolvedAxis[idx]];
- BOOST_ASSERT(boost::numeric_cast<float>(current) <
+ ARMNN_ASSERT(boost::numeric_cast<float>(current) <
(std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis)));
numElementsInAxis *= current;
}
diff --git a/src/backends/reference/workloads/RefConstantWorkload.cpp b/src/backends/reference/workloads/RefConstantWorkload.cpp
index 3506198410..d3e65e6615 100644
--- a/src/backends/reference/workloads/RefConstantWorkload.cpp
+++ b/src/backends/reference/workloads/RefConstantWorkload.cpp
@@ -9,7 +9,7 @@
#include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cstring>
@@ -24,10 +24,10 @@ void RefConstantWorkload::PostAllocationConfigure()
{
const ConstantQueueDescriptor& data = this->m_Data;
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_ASSERT(data.m_LayerOutput != nullptr);
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
- BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
+ ARMNN_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
outputInfo.GetNumBytes());
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index ac82db90e5..f8c3548905 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -32,7 +32,7 @@ RefFullyConnectedWorkload::RefFullyConnectedWorkload(
void RefFullyConnectedWorkload::PostAllocationConfigure()
{
const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
- BOOST_ASSERT(inputInfo.GetNumDimensions() > 1);
+ ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
m_InputShape = inputInfo.GetShape();
m_InputDecoder = MakeDecoder<float>(inputInfo);
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
index a987e79dda..a2ace13144 100644
--- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
@@ -12,7 +12,7 @@
#include <Profiling.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -27,8 +27,8 @@ void RefLogSoftmaxWorkload::Execute() const
std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
- BOOST_ASSERT(decoder != nullptr);
- BOOST_ASSERT(encoder != nullptr);
+ ARMNN_ASSERT(decoder != nullptr);
+ ARMNN_ASSERT(encoder != nullptr);
LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters);
}
diff --git a/src/backends/reference/workloads/RefStackWorkload.cpp b/src/backends/reference/workloads/RefStackWorkload.cpp
index be36f40633..fc859506a3 100644
--- a/src/backends/reference/workloads/RefStackWorkload.cpp
+++ b/src/backends/reference/workloads/RefStackWorkload.cpp
@@ -26,7 +26,7 @@ void RefStackWorkload::Execute() const
if (!m_Data.m_Parameters.m_Axis)
{
float* output = GetOutputTensorData<float>(0, m_Data);
- BOOST_ASSERT(output != nullptr);
+ ARMNN_ASSERT(output != nullptr);
unsigned int numInputs = m_Data.m_Parameters.m_NumInputs;
unsigned int inputLength = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements();
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index bfd3c284ae..e994a09230 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -27,7 +27,7 @@ void RefStridedSliceWorkload::Execute() const
DataType inputDataType = inputInfo.GetDataType();
DataType outputDataType = outputInfo.GetDataType();
- BOOST_ASSERT(inputDataType == outputDataType);
+ ARMNN_ASSERT(inputDataType == outputDataType);
IgnoreUnused(outputDataType);
StridedSlice(inputInfo,
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
index 0223cdc56a..e972524f11 100644
--- a/src/backends/reference/workloads/Slice.cpp
+++ b/src/backends/reference/workloads/Slice.cpp
@@ -5,9 +5,9 @@
#include "Slice.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
@@ -22,11 +22,11 @@ void Slice(const TensorInfo& inputInfo,
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int numDims = inputShape.GetNumDimensions();
- BOOST_ASSERT(descriptor.m_Begin.size() == numDims);
- BOOST_ASSERT(descriptor.m_Size.size() == numDims);
+ ARMNN_ASSERT(descriptor.m_Begin.size() == numDims);
+ ARMNN_ASSERT(descriptor.m_Size.size() == numDims);
constexpr unsigned int maxNumDims = 4;
- BOOST_ASSERT(numDims <= maxNumDims);
+ ARMNN_ASSERT(numDims <= maxNumDims);
std::vector<unsigned int> paddedInput(4);
std::vector<unsigned int> paddedBegin(4);
@@ -65,10 +65,10 @@ void Slice(const TensorInfo& inputInfo,
unsigned int size2 = paddedSize[2];
unsigned int size3 = paddedSize[3];
- BOOST_ASSERT(begin0 + size0 <= dim0);
- BOOST_ASSERT(begin1 + size1 <= dim1);
- BOOST_ASSERT(begin2 + size2 <= dim2);
- BOOST_ASSERT(begin3 + size3 <= dim3);
+ ARMNN_ASSERT(begin0 + size0 <= dim0);
+ ARMNN_ASSERT(begin1 + size1 <= dim1);
+ ARMNN_ASSERT(begin2 + size2 <= dim2);
+ ARMNN_ASSERT(begin3 + size3 <= dim3);
const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
unsigned char* output = reinterpret_cast<unsigned char*>(outputData);
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index 5036389a10..32eca84849 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -16,9 +16,9 @@ namespace armnn
/// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis)
{
- BOOST_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
+ ARMNN_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
"Required axis index greater than number of dimensions.");
- BOOST_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
+ ARMNN_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
"Required axis index lower than negative of the number of dimensions");
unsigned int uAxis = axis < 0 ?
diff --git a/src/backends/reference/workloads/Splitter.cpp b/src/backends/reference/workloads/Splitter.cpp
index 3bddfb0cab..09edc5e0f5 100644
--- a/src/backends/reference/workloads/Splitter.cpp
+++ b/src/backends/reference/workloads/Splitter.cpp
@@ -6,8 +6,7 @@
#include "RefWorkloadUtils.hpp"
#include <backendsCommon/WorkloadData.hpp>
#include <armnn/Tensor.hpp>
-
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include "Splitter.hpp"
#include <cmath>
@@ -47,7 +46,7 @@ void Split(const SplitterQueueDescriptor& data)
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
- BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
+ ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
diff --git a/src/backends/reference/workloads/Splitter.hpp b/src/backends/reference/workloads/Splitter.hpp
index 271c6fdeb8..26309b080f 100644
--- a/src/backends/reference/workloads/Splitter.hpp
+++ b/src/backends/reference/workloads/Splitter.hpp
@@ -8,7 +8,7 @@
#include "RefWorkloadUtils.hpp"
#include <backendsCommon/WorkloadData.hpp>
#include <armnn/Tensor.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -38,7 +38,7 @@ void Splitter(const SplitterQueueDescriptor& data)
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
- BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
+ ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
@@ -67,10 +67,10 @@ void Splitter(const SplitterQueueDescriptor& data)
//We are within the view, to copy input data to the output corresponding to this view.
DataType* outputData = GetOutputTensorData<DataType>(viewIdx, data);
- BOOST_ASSERT(outputData);
+ ARMNN_ASSERT(outputData);
const DataType* inputData = GetInputTensorData<DataType>(0, data);
- BOOST_ASSERT(inputData);
+ ARMNN_ASSERT(inputData);
outputData[outIndex] = inputData[index];
}
diff --git a/src/backends/reference/workloads/StridedSlice.cpp b/src/backends/reference/workloads/StridedSlice.cpp
index 62f06dc5ec..b00b049ff6 100644
--- a/src/backends/reference/workloads/StridedSlice.cpp
+++ b/src/backends/reference/workloads/StridedSlice.cpp
@@ -7,7 +7,8 @@
#include <ResolveType.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
#include <cstring>
@@ -20,12 +21,12 @@ namespace
void PadParams(StridedSliceDescriptor& p, unsigned int dimCount)
{
- BOOST_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
+ ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
const unsigned int beginIndicesCount =
boost::numeric_cast<unsigned int>(p.m_Begin.size());
- BOOST_ASSERT(dimCount >= beginIndicesCount);
+ ARMNN_ASSERT(dimCount >= beginIndicesCount);
const unsigned int padCount = dimCount - beginIndicesCount;
p.m_Begin.resize(dimCount);
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index e03c42fe60..5d66fd5273 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -9,7 +9,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -25,7 +25,7 @@ public:
, m_Data(data)
, m_DataLayout(dataLayout)
{
- BOOST_ASSERT(m_Shape.GetNumDimensions() == 4);
+ ARMNN_ASSERT(m_Shape.GetNumDimensions() == 4);
}
DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const