aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-01 16:51:23 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-06 09:06:01 +0100
commitac2770a4bb6461bfbddec928bb6208f26f898f02 (patch)
treec72f67f648b7aca2f4bccf69b05d185bf5f9ccad /src/backends/reference
parent7ee5d2c3b3cee5a924ed6347fef613ee07b5aca7 (diff)
downloadarmnn-ac2770a4bb6461bfbddec928bb6208f26f898f02.tar.gz
IVGCVSW-4485 Remove Boost assert
* Change boost assert to armnn assert * Change include file to armnn assert * Fix ARMNN_ASSERT_MSG issue with multiple conditions * Change BOOST_ASSERT to BOOST_TEST where appropriate * Remove unused include statements Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I5d0fa3a37b7c1c921216de68f0073aa34702c9ff
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp4
-rw-r--r--src/backends/reference/RefMemoryManager.cpp12
-rw-r--r--src/backends/reference/RefTensorHandle.cpp10
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp25
-rw-r--r--src/backends/reference/workloads/BatchToSpaceNd.cpp10
-rw-r--r--src/backends/reference/workloads/Concatenate.cpp2
-rw-r--r--src/backends/reference/workloads/ConvImpl.cpp12
-rw-r--r--src/backends/reference/workloads/ConvImpl.hpp1
-rw-r--r--src/backends/reference/workloads/Decoders.hpp4
-rw-r--r--src/backends/reference/workloads/DepthToSpace.cpp4
-rw-r--r--src/backends/reference/workloads/Dequantize.cpp2
-rw-r--r--src/backends/reference/workloads/DetectionPostProcess.cpp6
-rw-r--r--src/backends/reference/workloads/Encoders.hpp6
-rw-r--r--src/backends/reference/workloads/FullyConnected.cpp2
-rw-r--r--src/backends/reference/workloads/Gather.cpp4
-rw-r--r--src/backends/reference/workloads/LogSoftmax.cpp4
-rw-r--r--src/backends/reference/workloads/Mean.cpp2
-rw-r--r--src/backends/reference/workloads/RefConstantWorkload.cpp6
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp6
-rw-r--r--src/backends/reference/workloads/RefStackWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefStridedSliceWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/Slice.cpp16
-rw-r--r--src/backends/reference/workloads/Softmax.cpp4
-rw-r--r--src/backends/reference/workloads/Splitter.cpp5
-rw-r--r--src/backends/reference/workloads/Splitter.hpp8
-rw-r--r--src/backends/reference/workloads/StridedSlice.cpp7
-rw-r--r--src/backends/reference/workloads/TensorBufferArrayView.hpp4
28 files changed, 84 insertions, 88 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 607c86b112..25d639a38a 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -348,7 +348,7 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
"Reference concatenation: output type not supported");
for (const TensorInfo* input : inputs)
{
- BOOST_ASSERT(input != nullptr);
+ ARMNN_ASSERT(input != nullptr);
supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
"Reference concatenation: input type not supported");
@@ -1864,7 +1864,7 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp
"Reference stack: output type not supported");
for (const TensorInfo* input : inputs)
{
- BOOST_ASSERT(input != nullptr);
+ ARMNN_ASSERT(input != nullptr);
supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
"Reference stack: input type not supported");
diff --git a/src/backends/reference/RefMemoryManager.cpp b/src/backends/reference/RefMemoryManager.cpp
index 4f15e39ee1..76054e41e1 100644
--- a/src/backends/reference/RefMemoryManager.cpp
+++ b/src/backends/reference/RefMemoryManager.cpp
@@ -4,7 +4,7 @@
//
#include "RefMemoryManager.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <algorithm>
@@ -35,7 +35,7 @@ RefMemoryManager::Pool* RefMemoryManager::Manage(unsigned int numBytes)
void RefMemoryManager::Allocate(RefMemoryManager::Pool* pool)
{
- BOOST_ASSERT(pool);
+ ARMNN_ASSERT(pool);
m_FreePools.push_back(pool);
}
@@ -75,25 +75,25 @@ RefMemoryManager::Pool::~Pool()
void* RefMemoryManager::Pool::GetPointer()
{
- BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
+ ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
return m_Pointer;
}
void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
{
- BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+ ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
m_Size = std::max(m_Size, numBytes);
}
void RefMemoryManager::Pool::Acquire()
{
- BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
+ ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
m_Pointer = ::operator new(size_t(m_Size));
}
void RefMemoryManager::Pool::Release()
{
- BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
+ ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
::operator delete(m_Pointer);
m_Pointer = nullptr;
}
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index 84a74edc1d..7d86b110a7 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -44,8 +44,8 @@ RefTensorHandle::~RefTensorHandle()
void RefTensorHandle::Manage()
{
- BOOST_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
- BOOST_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+ ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
+ ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
}
@@ -84,7 +84,7 @@ void* RefTensorHandle::GetPointer() const
}
else
{
- BOOST_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
+ ARMNN_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
return m_MemoryManager->GetPointer(m_Pool);
}
}
@@ -92,14 +92,14 @@ void* RefTensorHandle::GetPointer() const
void RefTensorHandle::CopyOutTo(void* dest) const
{
const void *src = GetPointer();
- BOOST_ASSERT(src);
+ ARMNN_ASSERT(src);
memcpy(dest, src, m_TensorInfo.GetNumBytes());
}
void RefTensorHandle::CopyInFrom(const void* src)
{
void *dest = GetPointer();
- BOOST_ASSERT(dest);
+ ARMNN_ASSERT(dest);
memcpy(dest, src, m_TensorInfo.GetNumBytes());
}
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index f43e8b67a9..be20644ab7 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -5,14 +5,13 @@
#pragma once
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
#include <ResolveType.hpp>
-#include <boost/assert.hpp>
-
namespace armnn
{
@@ -78,28 +77,28 @@ public:
TypedIterator& operator++() override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
++m_Iterator;
return *this;
}
TypedIterator& operator+=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator += increment;
return *this;
}
TypedIterator& operator-=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator -= increment;
return *this;
}
TypedIterator& operator[](const unsigned int index) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
return *this;
}
@@ -107,7 +106,7 @@ public:
TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
{
IgnoreUnused(axisIndex);
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
return *this;
}
@@ -504,7 +503,7 @@ public:
// This should be called to set index for per-axis Encoder/Decoder
PerAxisIterator& SetIndex(unsigned int index, unsigned int axisIndex) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
m_AxisIndex = axisIndex;
return *this;
@@ -519,7 +518,7 @@ public:
PerAxisIterator& operator++() override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
++m_Iterator;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
@@ -527,7 +526,7 @@ public:
PerAxisIterator& operator+=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator += increment;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
@@ -535,7 +534,7 @@ public:
PerAxisIterator& operator-=(const unsigned int decrement) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator -= decrement;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
@@ -543,7 +542,7 @@ public:
PerAxisIterator& operator[](const unsigned int index) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp
index 7efdb9b75c..bf7de1b04c 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.cpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp
@@ -9,7 +9,7 @@
#include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
using namespace armnnUtils;
@@ -42,11 +42,11 @@ void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
{
TensorShape inputShape = inputTensorInfo.GetShape();
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
TensorShape outputShape = outputTensorInfo.GetShape();
- BOOST_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
+ ARMNN_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
const unsigned int inputBatchSize = inputShape[0];
const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()];
@@ -55,12 +55,12 @@ void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()];
const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()];
- BOOST_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
+ ARMNN_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
const unsigned int blockShapeHeight = blockShape[0];
const unsigned int blockShapeWidth = blockShape[1];
- BOOST_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
+ ARMNN_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
const unsigned int cropsTop = cropsData[0].first;
const unsigned int cropsLeft = cropsData[1].first;
diff --git a/src/backends/reference/workloads/Concatenate.cpp b/src/backends/reference/workloads/Concatenate.cpp
index bb55424c0c..a85e34ee61 100644
--- a/src/backends/reference/workloads/Concatenate.cpp
+++ b/src/backends/reference/workloads/Concatenate.cpp
@@ -38,7 +38,7 @@ void Concatenate(const ConcatQueueDescriptor &data)
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
- BOOST_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
+ ARMNN_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
diff --git a/src/backends/reference/workloads/ConvImpl.cpp b/src/backends/reference/workloads/ConvImpl.cpp
index 0c13e3ba0d..9d2f410a25 100644
--- a/src/backends/reference/workloads/ConvImpl.cpp
+++ b/src/backends/reference/workloads/ConvImpl.cpp
@@ -5,7 +5,7 @@
#include "ConvImpl.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cmath>
#include <limits>
@@ -15,7 +15,7 @@ namespace armnn
QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier)
{
- BOOST_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
+ ARMNN_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
if (multiplier == 0.0f)
{
m_Multiplier = 0;
@@ -26,14 +26,14 @@ QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multi
const double q = std::frexp(multiplier, &m_RightShift);
m_RightShift = -m_RightShift;
int64_t qFixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
- BOOST_ASSERT(qFixed <= (1ll << 31));
+ ARMNN_ASSERT(qFixed <= (1ll << 31));
if (qFixed == (1ll << 31))
{
qFixed /= 2;
--m_RightShift;
}
- BOOST_ASSERT(m_RightShift >= 0);
- BOOST_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
+ ARMNN_ASSERT(m_RightShift >= 0);
+ ARMNN_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
m_Multiplier = static_cast<int32_t>(qFixed);
}
}
@@ -61,7 +61,7 @@ int32_t QuantizedMultiplierSmallerThanOne::SaturatingRoundingDoublingHighMul(int
int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent)
{
- BOOST_ASSERT(exponent >= 0 && exponent <= 31);
+ ARMNN_ASSERT(exponent >= 0 && exponent <= 31);
int32_t mask = (1 << exponent) - 1;
int32_t remainder = x & mask;
int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
diff --git a/src/backends/reference/workloads/ConvImpl.hpp b/src/backends/reference/workloads/ConvImpl.hpp
index 562fd3e296..f5aa8f3447 100644
--- a/src/backends/reference/workloads/ConvImpl.hpp
+++ b/src/backends/reference/workloads/ConvImpl.hpp
@@ -15,7 +15,6 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <cmath>
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 3434ccb764..deb3b1f4b2 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -10,7 +10,7 @@
#include <armnnUtils/FloatingPointConverter.hpp>
#include <armnnUtils/TensorUtils.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -142,7 +142,7 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
+ ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
break;
}
}
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
index 91ca160ae2..f5e9ec5498 100644
--- a/src/backends/reference/workloads/DepthToSpace.cpp
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -8,7 +8,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
using namespace armnnUtils;
@@ -22,7 +22,7 @@ void DepthToSpace(const TensorInfo& inputInfo,
unsigned int dataTypeSize)
{
const unsigned int blockSize = descriptor.m_BlockSize;
- BOOST_ASSERT(blockSize != 0u);
+ ARMNN_ASSERT(blockSize != 0u);
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int batches = inputShape[0];
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
index 63c0405efe..fdc8e30c75 100644
--- a/src/backends/reference/workloads/Dequantize.cpp
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -16,7 +16,7 @@ void Dequantize(Decoder<float>& inputDecoder,
const TensorInfo& outputInfo)
{
IgnoreUnused(outputInfo);
- BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+ ARMNN_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
{
// inputDecoder.Get() dequantizes the data element from whatever
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index 57cf01e4a1..61a504ec6b 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -5,8 +5,8 @@
#include "DetectionPostProcess.hpp"
+#include <armnn/utility/Assert.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <algorithm>
@@ -213,8 +213,8 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
// xmax
boxCorners[indexW] = xCentre + halfW;
- BOOST_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
- BOOST_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
+ ARMNN_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
+ ARMNN_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
}
unsigned int numClassesWithBg = desc.m_NumClasses + 1;
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index e93987da31..c0524a7719 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -9,7 +9,7 @@
#include <armnnUtils/TensorUtils.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -89,7 +89,7 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported target Data Type!");
+ ARMNN_ASSERT_MSG(false, "Unsupported target Data Type!");
break;
}
}
@@ -107,7 +107,7 @@ inline std::unique_ptr<Encoder<bool>> MakeEncoder(const TensorInfo& info, void*
}
default:
{
- BOOST_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
+ ARMNN_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
break;
}
}
diff --git a/src/backends/reference/workloads/FullyConnected.cpp b/src/backends/reference/workloads/FullyConnected.cpp
index 02d9b060ef..5a87520f84 100644
--- a/src/backends/reference/workloads/FullyConnected.cpp
+++ b/src/backends/reference/workloads/FullyConnected.cpp
@@ -7,8 +7,6 @@
#include "RefWorkloadUtils.hpp"
-#include <boost/assert.hpp>
-
namespace armnn
{
diff --git a/src/backends/reference/workloads/Gather.cpp b/src/backends/reference/workloads/Gather.cpp
index 4cf3a142a0..c23edcd3bd 100644
--- a/src/backends/reference/workloads/Gather.cpp
+++ b/src/backends/reference/workloads/Gather.cpp
@@ -36,7 +36,7 @@ void Gather(const TensorInfo& paramsInfo,
{
unsigned int indx = boost::numeric_cast<unsigned int>(indices[i]);
- BOOST_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
+ ARMNN_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
unsigned int startOffset = indx * paramsProduct;
unsigned int endOffset = startOffset + paramsProduct;
@@ -51,7 +51,7 @@ void Gather(const TensorInfo& paramsInfo,
}
}
- BOOST_ASSERT(outIndex == outputInfo.GetNumElements());
+ ARMNN_ASSERT(outIndex == outputInfo.GetNumElements());
}
} //namespace armnn
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index 103d62a8df..1998f50c87 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -6,11 +6,11 @@
#include "LogSoftmax.hpp"
#include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <cmath>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace
@@ -35,7 +35,7 @@ void LogSoftmax(Decoder<float>& input,
const unsigned int numDimensions = inputInfo.GetNumDimensions();
bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
- BOOST_ASSERT_MSG(axisIsValid,
+ ARMNN_ASSERT_MSG(axisIsValid,
"Axis index is not in range [-numDimensions, numDimensions).");
IgnoreUnused(axisIsValid);
diff --git a/src/backends/reference/workloads/Mean.cpp b/src/backends/reference/workloads/Mean.cpp
index f2c0a4fc3f..72080ef042 100644
--- a/src/backends/reference/workloads/Mean.cpp
+++ b/src/backends/reference/workloads/Mean.cpp
@@ -128,7 +128,7 @@ void Mean(const armnn::TensorInfo& inputInfo,
for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
{
unsigned int current = inputDims[resolvedAxis[idx]];
- BOOST_ASSERT(boost::numeric_cast<float>(current) <
+ ARMNN_ASSERT(boost::numeric_cast<float>(current) <
(std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis)));
numElementsInAxis *= current;
}
diff --git a/src/backends/reference/workloads/RefConstantWorkload.cpp b/src/backends/reference/workloads/RefConstantWorkload.cpp
index 3506198410..d3e65e6615 100644
--- a/src/backends/reference/workloads/RefConstantWorkload.cpp
+++ b/src/backends/reference/workloads/RefConstantWorkload.cpp
@@ -9,7 +9,7 @@
#include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cstring>
@@ -24,10 +24,10 @@ void RefConstantWorkload::PostAllocationConfigure()
{
const ConstantQueueDescriptor& data = this->m_Data;
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_ASSERT(data.m_LayerOutput != nullptr);
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
- BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
+ ARMNN_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
outputInfo.GetNumBytes());
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index ac82db90e5..f8c3548905 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -32,7 +32,7 @@ RefFullyConnectedWorkload::RefFullyConnectedWorkload(
void RefFullyConnectedWorkload::PostAllocationConfigure()
{
const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
- BOOST_ASSERT(inputInfo.GetNumDimensions() > 1);
+ ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
m_InputShape = inputInfo.GetShape();
m_InputDecoder = MakeDecoder<float>(inputInfo);
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
index a987e79dda..a2ace13144 100644
--- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
@@ -12,7 +12,7 @@
#include <Profiling.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -27,8 +27,8 @@ void RefLogSoftmaxWorkload::Execute() const
std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
- BOOST_ASSERT(decoder != nullptr);
- BOOST_ASSERT(encoder != nullptr);
+ ARMNN_ASSERT(decoder != nullptr);
+ ARMNN_ASSERT(encoder != nullptr);
LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters);
}
diff --git a/src/backends/reference/workloads/RefStackWorkload.cpp b/src/backends/reference/workloads/RefStackWorkload.cpp
index be36f40633..fc859506a3 100644
--- a/src/backends/reference/workloads/RefStackWorkload.cpp
+++ b/src/backends/reference/workloads/RefStackWorkload.cpp
@@ -26,7 +26,7 @@ void RefStackWorkload::Execute() const
if (!m_Data.m_Parameters.m_Axis)
{
float* output = GetOutputTensorData<float>(0, m_Data);
- BOOST_ASSERT(output != nullptr);
+ ARMNN_ASSERT(output != nullptr);
unsigned int numInputs = m_Data.m_Parameters.m_NumInputs;
unsigned int inputLength = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements();
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index bfd3c284ae..e994a09230 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -27,7 +27,7 @@ void RefStridedSliceWorkload::Execute() const
DataType inputDataType = inputInfo.GetDataType();
DataType outputDataType = outputInfo.GetDataType();
- BOOST_ASSERT(inputDataType == outputDataType);
+ ARMNN_ASSERT(inputDataType == outputDataType);
IgnoreUnused(outputDataType);
StridedSlice(inputInfo,
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
index 0223cdc56a..e972524f11 100644
--- a/src/backends/reference/workloads/Slice.cpp
+++ b/src/backends/reference/workloads/Slice.cpp
@@ -5,9 +5,9 @@
#include "Slice.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
@@ -22,11 +22,11 @@ void Slice(const TensorInfo& inputInfo,
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int numDims = inputShape.GetNumDimensions();
- BOOST_ASSERT(descriptor.m_Begin.size() == numDims);
- BOOST_ASSERT(descriptor.m_Size.size() == numDims);
+ ARMNN_ASSERT(descriptor.m_Begin.size() == numDims);
+ ARMNN_ASSERT(descriptor.m_Size.size() == numDims);
constexpr unsigned int maxNumDims = 4;
- BOOST_ASSERT(numDims <= maxNumDims);
+ ARMNN_ASSERT(numDims <= maxNumDims);
std::vector<unsigned int> paddedInput(4);
std::vector<unsigned int> paddedBegin(4);
@@ -65,10 +65,10 @@ void Slice(const TensorInfo& inputInfo,
unsigned int size2 = paddedSize[2];
unsigned int size3 = paddedSize[3];
- BOOST_ASSERT(begin0 + size0 <= dim0);
- BOOST_ASSERT(begin1 + size1 <= dim1);
- BOOST_ASSERT(begin2 + size2 <= dim2);
- BOOST_ASSERT(begin3 + size3 <= dim3);
+ ARMNN_ASSERT(begin0 + size0 <= dim0);
+ ARMNN_ASSERT(begin1 + size1 <= dim1);
+ ARMNN_ASSERT(begin2 + size2 <= dim2);
+ ARMNN_ASSERT(begin3 + size3 <= dim3);
const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
unsigned char* output = reinterpret_cast<unsigned char*>(outputData);
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index 5036389a10..32eca84849 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -16,9 +16,9 @@ namespace armnn
/// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis)
{
- BOOST_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
+ ARMNN_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
"Required axis index greater than number of dimensions.");
- BOOST_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
+ ARMNN_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
"Required axis index lower than negative of the number of dimensions");
unsigned int uAxis = axis < 0 ?
diff --git a/src/backends/reference/workloads/Splitter.cpp b/src/backends/reference/workloads/Splitter.cpp
index 3bddfb0cab..09edc5e0f5 100644
--- a/src/backends/reference/workloads/Splitter.cpp
+++ b/src/backends/reference/workloads/Splitter.cpp
@@ -6,8 +6,7 @@
#include "RefWorkloadUtils.hpp"
#include <backendsCommon/WorkloadData.hpp>
#include <armnn/Tensor.hpp>
-
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include "Splitter.hpp"
#include <cmath>
@@ -47,7 +46,7 @@ void Split(const SplitterQueueDescriptor& data)
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
- BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
+ ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
diff --git a/src/backends/reference/workloads/Splitter.hpp b/src/backends/reference/workloads/Splitter.hpp
index 271c6fdeb8..26309b080f 100644
--- a/src/backends/reference/workloads/Splitter.hpp
+++ b/src/backends/reference/workloads/Splitter.hpp
@@ -8,7 +8,7 @@
#include "RefWorkloadUtils.hpp"
#include <backendsCommon/WorkloadData.hpp>
#include <armnn/Tensor.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -38,7 +38,7 @@ void Splitter(const SplitterQueueDescriptor& data)
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
- BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
+ ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
@@ -67,10 +67,10 @@ void Splitter(const SplitterQueueDescriptor& data)
//We are within the view, to copy input data to the output corresponding to this view.
DataType* outputData = GetOutputTensorData<DataType>(viewIdx, data);
- BOOST_ASSERT(outputData);
+ ARMNN_ASSERT(outputData);
const DataType* inputData = GetInputTensorData<DataType>(0, data);
- BOOST_ASSERT(inputData);
+ ARMNN_ASSERT(inputData);
outputData[outIndex] = inputData[index];
}
diff --git a/src/backends/reference/workloads/StridedSlice.cpp b/src/backends/reference/workloads/StridedSlice.cpp
index 62f06dc5ec..b00b049ff6 100644
--- a/src/backends/reference/workloads/StridedSlice.cpp
+++ b/src/backends/reference/workloads/StridedSlice.cpp
@@ -7,7 +7,8 @@
#include <ResolveType.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
#include <cstring>
@@ -20,12 +21,12 @@ namespace
void PadParams(StridedSliceDescriptor& p, unsigned int dimCount)
{
- BOOST_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
+ ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
const unsigned int beginIndicesCount =
boost::numeric_cast<unsigned int>(p.m_Begin.size());
- BOOST_ASSERT(dimCount >= beginIndicesCount);
+ ARMNN_ASSERT(dimCount >= beginIndicesCount);
const unsigned int padCount = dimCount - beginIndicesCount;
p.m_Begin.resize(dimCount);
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index e03c42fe60..5d66fd5273 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -9,7 +9,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -25,7 +25,7 @@ public:
, m_Data(data)
, m_DataLayout(dataLayout)
{
- BOOST_ASSERT(m_Shape.GetNumDimensions() == 4);
+ ARMNN_ASSERT(m_Shape.GetNumDimensions() == 4);
}
DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const