aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-01 16:51:23 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-04-06 09:06:01 +0100
commitac2770a4bb6461bfbddec928bb6208f26f898f02 (patch)
treec72f67f648b7aca2f4bccf69b05d185bf5f9ccad
parent7ee5d2c3b3cee5a924ed6347fef613ee07b5aca7 (diff)
downloadarmnn-ac2770a4bb6461bfbddec928bb6208f26f898f02.tar.gz
IVGCVSW-4485 Remove Boost assert
* Change boost assert to armnn assert * Change include file to armnn assert * Fix ARMNN_ASSERT_MSG issue with multiple conditions * Change BOOST_ASSERT to BOOST_TEST where appropriate * Remove unused include statements Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I5d0fa3a37b7c1c921216de68f0073aa34702c9ff
-rw-r--r--include/armnn/utility/Assert.hpp2
-rw-r--r--include/armnnUtils/DataLayoutIndexed.hpp10
-rw-r--r--include/armnnUtils/TensorUtils.hpp2
-rw-r--r--src/armnn/Descriptors.cpp4
-rw-r--r--src/armnn/Graph.cpp36
-rw-r--r--src/armnn/Graph.hpp18
-rw-r--r--src/armnn/InternalTypes.cpp4
-rw-r--r--src/armnn/Layer.cpp20
-rw-r--r--src/armnn/LayerSupport.cpp6
-rw-r--r--src/armnn/LoadedNetwork.cpp28
-rw-r--r--src/armnn/Logging.cpp4
-rw-r--r--src/armnn/Network.cpp28
-rw-r--r--src/armnn/NetworkQuantizerUtils.cpp2
-rw-r--r--src/armnn/NetworkQuantizerUtils.hpp7
-rw-r--r--src/armnn/NetworkUtils.cpp2
-rw-r--r--src/armnn/Optimizer.cpp2
-rw-r--r--src/armnn/OutputHandler.cpp2
-rw-r--r--src/armnn/OutputHandler.hpp2
-rw-r--r--src/armnn/OverrideInputRangeVisitor.cpp2
-rw-r--r--src/armnn/Profiling.cpp15
-rw-r--r--src/armnn/QuantizerVisitor.cpp16
-rw-r--r--src/armnn/Runtime.cpp2
-rw-r--r--src/armnn/SubgraphView.cpp4
-rw-r--r--src/armnn/SubgraphViewSelector.cpp10
-rw-r--r--src/armnn/Tensor.cpp5
-rw-r--r--src/armnn/TypesUtils.cpp10
-rw-r--r--src/armnn/layers/AbsLayer.cpp2
-rw-r--r--src/armnn/layers/ActivationLayer.cpp2
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp6
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp10
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp10
-rw-r--r--src/armnn/layers/ComparisonLayer.cpp8
-rw-r--r--src/armnn/layers/ConcatLayer.cpp8
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp12
-rw-r--r--src/armnn/layers/DebugLayer.cpp2
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.cpp4
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp12
-rw-r--r--src/armnn/layers/DequantizeLayer.cpp2
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp4
-rw-r--r--src/armnn/layers/ElementwiseBaseLayer.cpp11
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.cpp4
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp2
-rw-r--r--src/armnn/layers/FloorLayer.cpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp10
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/L2NormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.cpp2
-rw-r--r--src/armnn/layers/LstmLayer.cpp50
-rw-r--r--src/armnn/layers/MeanLayer.cpp2
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp2
-rw-r--r--src/armnn/layers/MemImportLayer.cpp2
-rw-r--r--src/armnn/layers/MergeLayer.cpp4
-rw-r--r--src/armnn/layers/NormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/PermuteLayer.cpp4
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp10
-rw-r--r--src/armnn/layers/PreluLayer.cpp10
-rw-r--r--src/armnn/layers/QLstmLayer.cpp52
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp28
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp2
-rw-r--r--src/armnn/layers/ResizeLayer.cpp4
-rw-r--r--src/armnn/layers/RsqrtLayer.cpp2
-rw-r--r--src/armnn/layers/SliceLayer.cpp5
-rw-r--r--src/armnn/layers/SoftmaxLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp4
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp4
-rw-r--r--src/armnn/layers/SplitterLayer.cpp6
-rw-r--r--src/armnn/layers/StackLayer.cpp4
-rw-r--r--src/armnn/layers/StridedSliceLayer.cpp4
-rw-r--r--src/armnn/layers/SwitchLayer.cpp4
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp16
-rw-r--r--src/armnn/layers/TransposeLayer.cpp4
-rw-r--r--src/armnn/optimizations/FoldPadIntoConvolution2d.hpp8
-rw-r--r--src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp4
-rw-r--r--src/armnn/optimizations/OptimizeInverseConversions.hpp2
-rw-r--r--src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp4
-rw-r--r--src/armnn/test/OptimizerTests.cpp16
-rw-r--r--src/armnn/test/QuantizerTest.cpp2
-rw-r--r--src/armnn/test/TensorHelpers.hpp4
-rw-r--r--src/armnn/test/TestUtils.cpp6
-rw-r--r--src/armnnCaffeParser/CaffeParser.cpp15
-rw-r--r--src/armnnDeserializer/Deserializer.cpp26
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp6
-rw-r--r--src/armnnOnnxParser/OnnxParser.cpp35
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp56
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp6
-rw-r--r--src/armnnTfLiteParser/test/Unsupported.cpp6
-rwxr-xr-xsrc/armnnTfParser/TfParser.cpp32
-rw-r--r--src/armnnTfParser/test/AddN.cpp4
-rw-r--r--src/armnnTfParser/test/Convolution2d.cpp2
-rw-r--r--src/armnnUtils/DotSerializer.cpp1
-rw-r--r--src/armnnUtils/FloatingPointConverter.cpp18
-rw-r--r--src/armnnUtils/GraphTopologicalSort.hpp1
-rw-r--r--src/armnnUtils/ModelAccuracyChecker.cpp2
-rw-r--r--src/armnnUtils/ModelAccuracyChecker.hpp2
-rw-r--r--src/armnnUtils/TensorUtils.cpp12
-rw-r--r--src/armnnUtils/test/ParserHelperTest.cpp28
-rw-r--r--src/armnnUtils/test/PrototxtConversionsTest.cpp30
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.cpp2
-rw-r--r--src/backends/aclCommon/ArmComputeUtils.hpp5
-rw-r--r--src/backends/aclCommon/BaseMemoryManager.cpp14
-rw-r--r--src/backends/backendsCommon/CpuTensorHandle.cpp4
-rw-r--r--src/backends/backendsCommon/CpuTensorHandle.hpp10
-rw-r--r--src/backends/backendsCommon/LayerSupportRules.hpp4
-rw-r--r--src/backends/backendsCommon/MakeWorkloadHelper.hpp2
-rw-r--r--src/backends/backendsCommon/Workload.hpp18
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp2
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp16
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.cpp8
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.hpp4
-rw-r--r--src/backends/backendsCommon/test/MockBackend.cpp10
-rw-r--r--src/backends/backendsCommon/test/WorkloadTestUtils.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp18
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp20
-rw-r--r--src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp2
-rw-r--r--src/backends/cl/ClBackendContext.cpp3
-rw-r--r--src/backends/cl/ClContextControl.cpp6
-rw-r--r--src/backends/cl/workloads/ClConstantWorkload.cpp4
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp4
-rw-r--r--src/backends/neon/NeonInterceptorScheduler.cpp2
-rw-r--r--src/backends/neon/NeonTensorHandle.hpp4
-rw-r--r--src/backends/neon/NeonTimer.cpp5
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp4
-rw-r--r--src/backends/reference/RefLayerSupport.cpp4
-rw-r--r--src/backends/reference/RefMemoryManager.cpp12
-rw-r--r--src/backends/reference/RefTensorHandle.cpp10
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp25
-rw-r--r--src/backends/reference/workloads/BatchToSpaceNd.cpp10
-rw-r--r--src/backends/reference/workloads/Concatenate.cpp2
-rw-r--r--src/backends/reference/workloads/ConvImpl.cpp12
-rw-r--r--src/backends/reference/workloads/ConvImpl.hpp1
-rw-r--r--src/backends/reference/workloads/Decoders.hpp4
-rw-r--r--src/backends/reference/workloads/DepthToSpace.cpp4
-rw-r--r--src/backends/reference/workloads/Dequantize.cpp2
-rw-r--r--src/backends/reference/workloads/DetectionPostProcess.cpp6
-rw-r--r--src/backends/reference/workloads/Encoders.hpp6
-rw-r--r--src/backends/reference/workloads/FullyConnected.cpp2
-rw-r--r--src/backends/reference/workloads/Gather.cpp4
-rw-r--r--src/backends/reference/workloads/LogSoftmax.cpp4
-rw-r--r--src/backends/reference/workloads/Mean.cpp2
-rw-r--r--src/backends/reference/workloads/RefConstantWorkload.cpp6
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp6
-rw-r--r--src/backends/reference/workloads/RefStackWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/RefStridedSliceWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/Slice.cpp16
-rw-r--r--src/backends/reference/workloads/Softmax.cpp4
-rw-r--r--src/backends/reference/workloads/Splitter.cpp5
-rw-r--r--src/backends/reference/workloads/Splitter.hpp8
-rw-r--r--src/backends/reference/workloads/StridedSlice.cpp7
-rw-r--r--src/backends/reference/workloads/TensorBufferArrayView.hpp4
-rw-r--r--src/profiling/CommandHandler.cpp2
-rw-r--r--src/profiling/CommandHandlerRegistry.cpp7
-rw-r--r--src/profiling/CounterDirectory.cpp57
-rw-r--r--src/profiling/FileOnlyProfilingConnection.cpp2
-rw-r--r--src/profiling/ProfilingService.cpp26
-rw-r--r--src/profiling/ProfilingService.hpp4
-rw-r--r--src/profiling/ProfilingUtils.cpp48
-rw-r--r--src/profiling/SendCounterPacket.cpp23
-rw-r--r--src/profiling/SendTimelinePacket.hpp4
-rw-r--r--src/profiling/test/ProfilingMocks.hpp24
-rw-r--r--src/profiling/test/ProfilingTestUtils.cpp12
-rw-r--r--src/profiling/test/SendCounterPacketTests.cpp18
-rw-r--r--src/profiling/test/SendCounterPacketTests.hpp2
-rw-r--r--tests/CaffePreprocessor.cpp1
-rw-r--r--tests/DeepSpeechV1InferenceTest.hpp8
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp2
-rw-r--r--tests/ImagePreprocessor.cpp1
-rw-r--r--tests/InferenceModel.hpp3
-rw-r--r--tests/InferenceTest.cpp7
-rw-r--r--tests/InferenceTest.inl10
-rw-r--r--tests/InferenceTestImage.cpp3
-rw-r--r--tests/MnistDatabase.cpp2
-rw-r--r--tests/MobileNetSsdInferenceTest.hpp10
-rw-r--r--tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp2
-rw-r--r--tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp2
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp2
-rw-r--r--tests/YoloDatabase.cpp1
-rw-r--r--tests/YoloInferenceTest.hpp6
-rw-r--r--tests/profiling/gatordmock/GatordMockService.cpp2
-rw-r--r--tests/profiling/gatordmock/tests/GatordMockTests.cpp4
194 files changed, 800 insertions, 808 deletions
diff --git a/include/armnn/utility/Assert.hpp b/include/armnn/utility/Assert.hpp
index 4d2f47b90b..455775f9b7 100644
--- a/include/armnn/utility/Assert.hpp
+++ b/include/armnn/utility/Assert.hpp
@@ -12,7 +12,7 @@ namespace armnn
#ifndef NDEBUG
# define ARMNN_ASSERT(COND) assert(COND)
-# define ARMNN_ASSERT_MSG(COND, MSG) assert(COND && MSG)
+# define ARMNN_ASSERT_MSG(COND, MSG) assert((COND) && MSG)
#else
# define ARMNN_ASSERT(COND)
# define ARMNN_ASSERT_MSG(COND, MSG)
diff --git a/include/armnnUtils/DataLayoutIndexed.hpp b/include/armnnUtils/DataLayoutIndexed.hpp
index c6701f7d5c..e377cc5a31 100644
--- a/include/armnnUtils/DataLayoutIndexed.hpp
+++ b/include/armnnUtils/DataLayoutIndexed.hpp
@@ -8,7 +8,7 @@
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnnUtils
{
@@ -28,12 +28,12 @@ public:
unsigned int batchIndex, unsigned int channelIndex,
unsigned int heightIndex, unsigned int widthIndex) const
{
- BOOST_ASSERT( batchIndex < shape[0] || ( shape[0] == 0 && batchIndex == 0 ) );
- BOOST_ASSERT( channelIndex < shape[m_ChannelsIndex] ||
+ ARMNN_ASSERT( batchIndex < shape[0] || ( shape[0] == 0 && batchIndex == 0 ) );
+ ARMNN_ASSERT( channelIndex < shape[m_ChannelsIndex] ||
( shape[m_ChannelsIndex] == 0 && channelIndex == 0) );
- BOOST_ASSERT( heightIndex < shape[m_HeightIndex] ||
+ ARMNN_ASSERT( heightIndex < shape[m_HeightIndex] ||
( shape[m_HeightIndex] == 0 && heightIndex == 0) );
- BOOST_ASSERT( widthIndex < shape[m_WidthIndex] ||
+ ARMNN_ASSERT( widthIndex < shape[m_WidthIndex] ||
( shape[m_WidthIndex] == 0 && widthIndex == 0) );
/// Offset the given indices appropriately depending on the data layout
diff --git a/include/armnnUtils/TensorUtils.hpp b/include/armnnUtils/TensorUtils.hpp
index fbfb8f4e1e..cc5f780f10 100644
--- a/include/armnnUtils/TensorUtils.hpp
+++ b/include/armnnUtils/TensorUtils.hpp
@@ -7,8 +7,6 @@
#include <armnn/TypesUtils.hpp>
-#include <boost/assert.hpp>
-
namespace armnnUtils
{
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp
index 95f9b5dd2b..8f4df79428 100644
--- a/src/armnn/Descriptors.cpp
+++ b/src/armnn/Descriptors.cpp
@@ -5,6 +5,8 @@
#include "armnn/Descriptors.hpp"
#include "armnn/Logging.hpp"
+#include <armnn/utility/Assert.hpp>
+
#include <algorithm>
#include <array>
#include <vector>
@@ -195,7 +197,7 @@ const uint32_t* OriginsDescriptor::GetViewOrigin(uint32_t idx) const
// Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
void OriginsDescriptor::ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering)
{
- BOOST_ASSERT_MSG(m_NumViews == numNewOrdering, "number of views must match number of "
+ ARMNN_ASSERT_MSG(m_NumViews == numNewOrdering, "number of views must match number of "
"elements in the new ordering array");
std::vector<uint32_t*> viewOrigins(&m_ViewOrigins[0], &m_ViewOrigins[m_NumViews]);
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 0d326adae7..78b08ecace 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -13,9 +13,9 @@
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/Assert.hpp>
#include <boost/polymorphic_cast.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <unordered_map>
@@ -142,7 +142,7 @@ Status Graph::SerializeToDot(std::ostream& stream)
Status Graph::AllocateDynamicBuffers()
{
// Layers must be sorted in topological order
- BOOST_ASSERT(m_LayersInOrder);
+ ARMNN_ASSERT(m_LayersInOrder);
std::unordered_set<const ITensorHandle*> preallocatedTensors;
std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
@@ -268,7 +268,7 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
auto MayNeedCompatibilityLayer = [](const Layer& layer)
{
// All layers should have been associated with a valid compute device at this point.
- BOOST_ASSERT(layer.GetBackendId() != Compute::Undefined);
+ ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
// Does not need another compatibility layer if a copy or import layer is already present.
return layer.GetType() != LayerType::MemCopy &&
layer.GetType() != LayerType::MemImport;
@@ -282,7 +282,7 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
{
- BOOST_ASSERT(srcLayer);
+ ARMNN_ASSERT(srcLayer);
if (!MayNeedCompatibilityLayer(*srcLayer))
{
@@ -299,10 +299,10 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
{
InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
- BOOST_ASSERT(dstInputSlot);
+ ARMNN_ASSERT(dstInputSlot);
EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
- BOOST_ASSERT_MSG(strategy != EdgeStrategy::Undefined,
+ ARMNN_ASSERT_MSG(strategy != EdgeStrategy::Undefined,
"Undefined memory strategy found while adding copy layers for compatibility");
const Layer& dstLayer = dstInputSlot->GetOwningLayer();
@@ -325,7 +325,7 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
}
else
{
- BOOST_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
+ ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
}
@@ -395,7 +395,7 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
void Graph::SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer)
{
- BOOST_ASSERT(substituteLayer != nullptr);
+ ARMNN_ASSERT(substituteLayer != nullptr);
ReplaceSubgraphConnections(subgraph, substituteLayer);
EraseSubgraphLayers(subgraph);
@@ -420,7 +420,7 @@ void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& subst
void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, IConnectableLayer* substituteLayer)
{
- BOOST_ASSERT(substituteLayer != nullptr);
+ ARMNN_ASSERT(substituteLayer != nullptr);
// Create a new sub-graph with only the given layer, using
// the given sub-graph as a reference of which parent graph to use
@@ -430,13 +430,13 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, IConnectabl
void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
{
- BOOST_ASSERT_MSG(!substituteSubgraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
+ ARMNN_ASSERT_MSG(!substituteSubgraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
{
IgnoreUnused(layer);
- BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
+ ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
"Substitute layer is not a member of graph");
});
@@ -449,8 +449,8 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
const SubgraphView::InputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetInputSlots();
const SubgraphView::OutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetOutputSlots();
- BOOST_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
- BOOST_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
+ ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
+ ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
// Disconnect the sub-graph and replace it with the substitute sub-graph
@@ -458,14 +458,14 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
{
InputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
- BOOST_ASSERT(subgraphInputSlot);
+ ARMNN_ASSERT(subgraphInputSlot);
IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
- BOOST_ASSERT(connectedOutputSlot);
+ ARMNN_ASSERT(connectedOutputSlot);
connectedOutputSlot->Disconnect(*subgraphInputSlot);
IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
- BOOST_ASSERT(substituteInputSlot);
+ ARMNN_ASSERT(substituteInputSlot);
connectedOutputSlot->Connect(*substituteInputSlot);
}
@@ -473,10 +473,10 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
{
OutputSlot* subgraphOutputSlot = subgraphOutputSlots.at(outputSlotIdx);
- BOOST_ASSERT(subgraphOutputSlot);
+ ARMNN_ASSERT(subgraphOutputSlot);
OutputSlot* substituteOutputSlot = substituteSubgraphOutputSlots.at(outputSlotIdx);
- BOOST_ASSERT(substituteOutputSlot);
+ ARMNN_ASSERT(substituteOutputSlot);
subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
}
}
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index 63bc8d062c..00ab8deaa0 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -11,6 +11,7 @@
#include <armnn/TensorFwd.hpp>
#include <armnn/NetworkFwd.hpp>
#include <armnn/Exceptions.hpp>
+#include <armnn/utility/Assert.hpp>
#include <list>
#include <map>
@@ -18,7 +19,6 @@
#include <unordered_set>
#include <vector>
-#include <boost/assert.hpp>
#include <boost/iterator/transform_iterator.hpp>
namespace armnn
@@ -115,8 +115,8 @@ public:
otherLayer->Reparent(*this, m_Layers.end());
});
- BOOST_ASSERT(other.m_PosInGraphMap.empty());
- BOOST_ASSERT(other.m_Layers.empty());
+ ARMNN_ASSERT(other.m_PosInGraphMap.empty());
+ ARMNN_ASSERT(other.m_Layers.empty());
return *this;
}
@@ -298,7 +298,7 @@ private:
const size_t numErased = graph.m_PosInGraphMap.erase(this);
IgnoreUnused(numErased);
- BOOST_ASSERT(numErased == 1);
+ ARMNN_ASSERT(numErased == 1);
}
protected:
@@ -356,7 +356,7 @@ public:
{
const size_t numErased = m_Graph->m_InputIds.erase(GetBindingId());
IgnoreUnused(numErased);
- BOOST_ASSERT(numErased == 1);
+ ARMNN_ASSERT(numErased == 1);
}
};
@@ -382,14 +382,14 @@ public:
{
const size_t numErased = m_Graph->m_OutputIds.erase(GetBindingId());
IgnoreUnused(numErased);
- BOOST_ASSERT(numErased == 1);
+ ARMNN_ASSERT(numErased == 1);
}
};
inline Graph::Iterator Graph::GetPosInGraph(Layer& layer)
{
auto it = m_PosInGraphMap.find(&layer);
- BOOST_ASSERT(it != m_PosInGraphMap.end());
+ ARMNN_ASSERT(it != m_PosInGraphMap.end());
return it->second;
}
@@ -429,7 +429,7 @@ inline LayerT* Graph::InsertNewLayer(OutputSlot& insertAfter, Args&&... args)
const Iterator pos = std::next(GetPosInGraph(owningLayer));
LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
- BOOST_ASSERT(layer->GetNumInputSlots() == 1);
+ ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
insertAfter.MoveAllConnections(layer->GetOutputSlot());
insertAfter.Connect(layer->GetInputSlot(0));
@@ -449,7 +449,7 @@ inline void Graph::EraseLayer(Iterator pos)
template <typename LayerT>
inline void Graph::EraseLayer(LayerT*& layer)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
EraseLayer(GetPosInGraph(*layer));
layer = nullptr;
}
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index 2fe38fc963..a9435b29f5 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -5,7 +5,7 @@
#include "InternalTypes.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -75,7 +75,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::TransposeConvolution2d: return "TransposeConvolution2d";
case LayerType::Transpose: return "Transpose";
default:
- BOOST_ASSERT_MSG(false, "Unknown layer type");
+ ARMNN_ASSERT_MSG(false, "Unknown layer type");
return "Unknown";
}
}
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 29d85b5a4c..024a18862d 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -19,7 +19,7 @@ namespace armnn
void InputSlot::Insert(Layer& layer)
{
- BOOST_ASSERT(layer.GetNumOutputSlots() == 1);
+ ARMNN_ASSERT(layer.GetNumOutputSlots() == 1);
OutputSlot* const prevSlot = GetConnectedOutputSlot();
@@ -29,7 +29,7 @@ void InputSlot::Insert(Layer& layer)
prevSlot->Disconnect(*this);
// Connects inserted layer to parent.
- BOOST_ASSERT(layer.GetNumInputSlots() == 1);
+ ARMNN_ASSERT(layer.GetNumInputSlots() == 1);
int idx = prevSlot->Connect(layer.GetInputSlot(0));
prevSlot->SetEdgeStrategy(boost::numeric_cast<unsigned int>(idx), EdgeStrategy::Undefined);
@@ -72,7 +72,7 @@ bool OutputSlot::IsTensorInfoSet() const
bool OutputSlot::ValidateTensorShape(const TensorShape& shape) const
{
- BOOST_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
+ ARMNN_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
return shape == m_OutputHandler.GetTensorInfo().GetShape();
}
@@ -113,7 +113,7 @@ void OutputSlot::MoveAllConnections(OutputSlot& destination)
{
while (GetNumConnections() > 0)
{
- BOOST_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
+ ARMNN_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
"Cannot move connections once memory strategies have be established.");
InputSlot& connection = *GetConnection(0);
@@ -131,7 +131,7 @@ unsigned int OutputSlot::CalculateIndexOnOwner() const
return i;
}
}
- BOOST_ASSERT_MSG(false, "Did not find slot on owner.");
+ ARMNN_ASSERT_MSG(false, "Did not find slot on owner.");
return 0; // Error
}
@@ -223,7 +223,7 @@ void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
for (auto&& inputSlot : GetInputSlots())
{
// The graph must be well-formed at this point.
- BOOST_ASSERT(inputSlot.GetConnection());
+ ARMNN_ASSERT(inputSlot.GetConnection());
const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
dataCollector.Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
}
@@ -255,7 +255,7 @@ void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
- BOOST_ASSERT(handleFactory);
+ ARMNN_ASSERT(handleFactory);
handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
}
}
@@ -337,7 +337,7 @@ LayerPriority Layer::GetPriority() const
void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
{
- BOOST_ASSERT(GetNumInputSlots() == expectedConnections);
+ ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
for (unsigned int i=0; i<expectedConnections; ++i)
{
@@ -370,8 +370,8 @@ void Layer::VerifyLayerConnections(unsigned int expectedConnections, const Check
std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(GetNumInputSlots() != 0);
- BOOST_ASSERT(GetNumOutputSlots() != 0);
+ ARMNN_ASSERT(GetNumInputSlots() != 0);
+ ARMNN_ASSERT(GetNumOutputSlots() != 0);
// By default we return what we got, meaning the output shape(s) are the same as the input(s).
// This only works if the number of inputs and outputs are the same. Since we are in the Layer
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 73e54b3006..fe5b542867 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -10,7 +10,7 @@
#include <armnn/backends/IBackendInternal.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cstring>
#include <algorithm>
@@ -144,7 +144,7 @@ bool IsConcatSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- BOOST_ASSERT(inputs.size() > 0);
+ ARMNN_ASSERT(inputs.size() > 0);
FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor);
}
@@ -418,7 +418,7 @@ bool IsMergerSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- BOOST_ASSERT(inputs.size() > 0);
+ ARMNN_ASSERT(inputs.size() > 0);
ARMNN_NO_DEPRECATE_WARN_BEGIN
FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 9d181e535a..9da988b9e5 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -13,6 +13,7 @@
#include <armnn/BackendRegistry.hpp>
#include <armnn/Logging.hpp>
+#include <armnn/utility/Assert.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <armnn/backends/IMemoryManager.hpp>
@@ -22,7 +23,6 @@
#include <LabelsAndEventClasses.hpp>
#include <boost/polymorphic_cast.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
namespace armnn
@@ -55,7 +55,7 @@ void AddLayerStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils,
for (auto&& input : layer.GetInputSlots())
{
const IOutputSlot* source = input.GetConnectedOutputSlot();
- BOOST_ASSERT(source != NULL);
+ ARMNN_ASSERT(source != NULL);
timelineUtils->CreateConnectionRelationship(ProfilingRelationshipType::RetentionLink,
source->GetOwningLayerGuid(),
layer.GetGuid());
@@ -304,7 +304,7 @@ TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const
{
for (auto&& inputLayer : m_OptimizedNetwork->GetGraph().GetInputLayers())
{
- BOOST_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+ ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
if (inputLayer->GetBindingId() == layerId)
{
return inputLayer->GetOutputSlot(0).GetTensorInfo();
@@ -318,8 +318,8 @@ TensorInfo LoadedNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
{
for (auto&& outputLayer : m_OptimizedNetwork->GetGraph().GetOutputLayers())
{
- BOOST_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
- BOOST_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
+ ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
+ ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
if (outputLayer->GetBindingId() == layerId)
{
return outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo();
@@ -346,10 +346,10 @@ const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) co
workloadFactory = it->second.first.get();
- BOOST_ASSERT_MSG(workloadFactory, "No workload factory");
+ ARMNN_ASSERT_MSG(workloadFactory, "No workload factory");
std::string reasonIfUnsupported;
- BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
+ ARMNN_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
"Factory does not support layer");
IgnoreUnused(reasonIfUnsupported);
return *workloadFactory;
@@ -540,11 +540,11 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer, ITensorHandle* tens
inputQueueDescriptor.m_Inputs.push_back(tensorHandle);
info.m_InputTensorInfos.push_back(tensorInfo);
- BOOST_ASSERT_MSG(layer.GetNumOutputSlots() == 1, "Can only handle Input Layer with one output");
+ ARMNN_ASSERT_MSG(layer.GetNumOutputSlots() == 1, "Can only handle Input Layer with one output");
const OutputHandler& handler = layer.GetOutputHandler();
const TensorInfo& outputTensorInfo = handler.GetTensorInfo();
ITensorHandle* outputTensorHandle = handler.GetData();
- BOOST_ASSERT_MSG(outputTensorHandle != nullptr,
+ ARMNN_ASSERT_MSG(outputTensorHandle != nullptr,
"Data should have been allocated.");
inputQueueDescriptor.m_Outputs.push_back(outputTensorHandle);
info.m_OutputTensorInfos.push_back(outputTensorInfo);
@@ -574,7 +574,7 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer, ITensorHandle* tens
// Create a mem copy workload for input since we did not import
std::unique_ptr<IWorkload> inputWorkload = std::make_unique<CopyMemGenericWorkload>(inputQueueDescriptor, info);
- BOOST_ASSERT_MSG(inputWorkload, "No input workload created");
+ ARMNN_ASSERT_MSG(inputWorkload, "No input workload created");
std::unique_ptr<TimelineUtilityMethods> timelineUtils =
TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
@@ -607,14 +607,14 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten
outputQueueDescriptor.m_Outputs.push_back(tensorHandle);
info.m_OutputTensorInfos.push_back(tensorInfo);
- BOOST_ASSERT_MSG(layer.GetNumInputSlots() == 1, "Output Layer should have exactly one input.");
+ ARMNN_ASSERT_MSG(layer.GetNumInputSlots() == 1, "Output Layer should have exactly one input.");
// Gets the output handler from the previous node.
const OutputHandler& outputHandler = layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
const TensorInfo& inputTensorInfo = outputHandler.GetTensorInfo();
ITensorHandle* inputTensorHandle = outputHandler.GetData();
- BOOST_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
+ ARMNN_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
// Try import the output tensor.
// Note: We can only import the output pointer if all of the following hold true:
@@ -641,7 +641,7 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten
syncDesc.m_Inputs.push_back(inputTensorHandle);
info.m_InputTensorInfos.push_back(inputTensorInfo);
auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info);
- BOOST_ASSERT_MSG(syncWorkload, "No sync workload created");
+ ARMNN_ASSERT_MSG(syncWorkload, "No sync workload created");
m_OutputQueue.push_back(move(syncWorkload));
}
else
@@ -667,7 +667,7 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten
std::unique_ptr<IWorkload> outputWorkload =
std::make_unique<CopyMemGenericWorkload>(outputQueueDescriptor, info);
- BOOST_ASSERT_MSG(outputWorkload, "No output workload created");
+ ARMNN_ASSERT_MSG(outputWorkload, "No output workload created");
std::unique_ptr<TimelineUtilityMethods> timelineUtils =
TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
diff --git a/src/armnn/Logging.cpp b/src/armnn/Logging.cpp
index ba401233ae..a3ca7ce118 100644
--- a/src/armnn/Logging.cpp
+++ b/src/armnn/Logging.cpp
@@ -6,6 +6,7 @@
#include <armnn/Logging.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/Assert.hpp>
#if defined(_MSC_VER)
#ifndef NOMINMAX
@@ -19,7 +20,6 @@
#include <android/log.h>
#endif
-#include <boost/assert.hpp>
#include <iostream>
namespace armnn
@@ -54,7 +54,7 @@ void SetLogFilter(LogSeverity level)
SimpleLogger<LogSeverity::Fatal>::Get().Enable(true);
break;
default:
- BOOST_ASSERT(false);
+ ARMNN_ASSERT(false);
}
}
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index a443721a45..ac5159a855 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -22,6 +22,7 @@
#include <armnn/TypesUtils.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/Logging.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <ProfilingService.hpp>
@@ -33,7 +34,6 @@
#include <vector>
#include <algorithm>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/converter_policies.hpp>
#include <boost/cast.hpp>
@@ -473,7 +473,7 @@ OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
}
else
{
- BOOST_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
+ ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
}
}
}
@@ -527,7 +527,7 @@ BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRe
{
auto backendFactory = backendRegistry.GetFactory(selectedBackend);
auto backendObjPtr = backendFactory();
- BOOST_ASSERT(backendObjPtr);
+ ARMNN_ASSERT(backendObjPtr);
backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
@@ -542,7 +542,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
BackendsMap& backends,
Optional<std::vector<std::string>&> errMessages)
{
- BOOST_ASSERT(optNetObjPtr);
+ ARMNN_ASSERT(optNetObjPtr);
OptimizationResult result;
@@ -553,7 +553,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
{
auto backendObjPtr = backends.find(selectedBackend)->second.get();
- BOOST_ASSERT(backendObjPtr);
+ ARMNN_ASSERT(backendObjPtr);
// Select sub-graphs based on backend
SubgraphViewSelector::Subgraphs subgraphs =
@@ -576,7 +576,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
{
// Try to optimize the current sub-graph
OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph);
- BOOST_ASSERT(optimizationViews.Validate(*subgraph));
+ ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
// Optimization attempted, check the resulting optimized sub-graph
for (auto& substitution : optimizationViews.GetSubstitutions())
@@ -589,7 +589,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
// Assign the current backend to the optimized sub-graph
std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
{
- BOOST_ASSERT(l);
+ ARMNN_ASSERT(l);
l->SetBackendId(selectedBackend);
});
}
@@ -660,7 +660,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backend
TensorHandleFactoryRegistry& registry)
{
Layer& layer = slot.GetOwningLayer();
- BOOST_ASSERT(layer.GetType() == LayerType::Input);
+ ARMNN_ASSERT(layer.GetType() == LayerType::Input);
// Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
// doesn't matter which backend it is assigned to because they all use the same implementation, which
@@ -686,7 +686,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backend
const Layer& connectedLayer = connection->GetOwningLayer();
auto toBackend = backends.find(connectedLayer.GetBackendId());
- BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+ ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
{
@@ -802,7 +802,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
const Layer& connectedLayer = connection->GetOwningLayer();
auto toBackend = backends.find(connectedLayer.GetBackendId());
- BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+ ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
for (auto&& src : srcPrefs)
@@ -863,7 +863,7 @@ EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
TensorHandleFactoryRegistry& registry)
{
auto toBackend = backends.find(connectedLayer.GetBackendId());
- BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+ ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
@@ -942,11 +942,11 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
optGraph.ForEachLayer([&backends, &registry, &result, &errMessages](Layer* layer)
{
- BOOST_ASSERT(layer);
+ ARMNN_ASSERT(layer);
// Lets make sure the backend is in our list of supported backends. Something went wrong during backend
// assignment if this check fails
- BOOST_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
+ ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
// Check each output separately
for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
@@ -1132,7 +1132,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
{
auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
auto backendPtr = factoryFun();
- BOOST_ASSERT(backendPtr.get() != nullptr);
+ ARMNN_ASSERT(backendPtr.get() != nullptr);
ARMNN_NO_DEPRECATE_WARN_BEGIN
auto backendSpecificOptimizations = backendPtr->GetOptimizations();
diff --git a/src/armnn/NetworkQuantizerUtils.cpp b/src/armnn/NetworkQuantizerUtils.cpp
index 75473b4ae6..dd0affde25 100644
--- a/src/armnn/NetworkQuantizerUtils.cpp
+++ b/src/armnn/NetworkQuantizerUtils.cpp
@@ -33,7 +33,7 @@ ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>
}
break;
default:
- BOOST_ASSERT_MSG(false, "Can't quantize unsupported data type");
+ ARMNN_ASSERT_MSG(false, "Can't quantize unsupported data type");
}
TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QAsymmU8, scale, offset);
diff --git a/src/armnn/NetworkQuantizerUtils.hpp b/src/armnn/NetworkQuantizerUtils.hpp
index 303a118a4e..dd274f9e35 100644
--- a/src/armnn/NetworkQuantizerUtils.hpp
+++ b/src/armnn/NetworkQuantizerUtils.hpp
@@ -10,20 +10,19 @@
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/ILayerVisitor.hpp>
+#include <armnn/utility/Assert.hpp>
#include <utility>
#include <limits>
-#include <boost/assert.hpp>
-
namespace armnn
{
template<typename srcType>
void QuantizeConstant(const srcType* src, uint8_t* dst, size_t numElements, float& scale, int& offset)
{
- BOOST_ASSERT(src);
- BOOST_ASSERT(dst);
+ ARMNN_ASSERT(src);
+ ARMNN_ASSERT(dst);
float min = std::numeric_limits<srcType>::max();
float max = std::numeric_limits<srcType>::lowest();
diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp
index 0549a115d4..285da4c9a9 100644
--- a/src/armnn/NetworkUtils.cpp
+++ b/src/armnn/NetworkUtils.cpp
@@ -245,7 +245,7 @@ std::vector<DebugLayer*> InsertDebugLayerAfter(Graph& graph, Layer& layer)
graph.InsertNewLayer<DebugLayer>(*outputSlot, debugName.c_str());
// Sets output tensor info for the debug layer.
- BOOST_ASSERT(debugLayer->GetInputSlot(0).GetConnectedOutputSlot() == &(*outputSlot));
+ ARMNN_ASSERT(debugLayer->GetInputSlot(0).GetConnectedOutputSlot() == &(*outputSlot));
TensorInfo debugInfo = debugLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
debugLayer->GetOutputSlot().SetTensorInfo(debugInfo);
diff --git a/src/armnn/Optimizer.cpp b/src/armnn/Optimizer.cpp
index 0a31f84654..cfb069333b 100644
--- a/src/armnn/Optimizer.cpp
+++ b/src/armnn/Optimizer.cpp
@@ -28,7 +28,7 @@ void Optimizer::Pass(Graph& graph, const Optimizations& optimizations)
--it;
for (auto&& optimization : optimizations)
{
- BOOST_ASSERT(*it);
+ ARMNN_ASSERT(*it);
optimization->Run(graph, **it);
if ((*it)->IsOutputUnconnected())
diff --git a/src/armnn/OutputHandler.cpp b/src/armnn/OutputHandler.cpp
index 5a542fdb2e..973d23b28e 100644
--- a/src/armnn/OutputHandler.cpp
+++ b/src/armnn/OutputHandler.cpp
@@ -9,8 +9,6 @@
#include <backendsCommon/WorkloadDataCollector.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/assert.hpp>
-
namespace armnn
{
diff --git a/src/armnn/OutputHandler.hpp b/src/armnn/OutputHandler.hpp
index 9cfde20c12..352520a000 100644
--- a/src/armnn/OutputHandler.hpp
+++ b/src/armnn/OutputHandler.hpp
@@ -17,8 +17,6 @@
#include <string>
#include <vector>
-#include <boost/assert.hpp>
-
namespace armnn
{
diff --git a/src/armnn/OverrideInputRangeVisitor.cpp b/src/armnn/OverrideInputRangeVisitor.cpp
index d0453fe326..6e5137b794 100644
--- a/src/armnn/OverrideInputRangeVisitor.cpp
+++ b/src/armnn/OverrideInputRangeVisitor.cpp
@@ -9,8 +9,6 @@
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
-
namespace armnn
{
diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp
index b1aedaab5a..7194064c11 100644
--- a/src/armnn/Profiling.cpp
+++ b/src/armnn/Profiling.cpp
@@ -5,6 +5,7 @@
#include "Profiling.hpp"
#include <armnn/BackendId.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include "JsonPrinter.hpp"
@@ -45,7 +46,7 @@ constexpr bool g_WriteReportToStdOutOnProfilerDestruction = false;
Measurement FindMeasurement(const std::string& name, const Event* event)
{
- BOOST_ASSERT(event != nullptr);
+ ARMNN_ASSERT(event != nullptr);
// Search though the measurements.
for (const auto& measurement : event->GetMeasurements())
@@ -63,7 +64,7 @@ Measurement FindMeasurement(const std::string& name, const Event* event)
std::vector<Measurement> FindKernelMeasurements(const Event* event)
{
- BOOST_ASSERT(event != nullptr);
+ ARMNN_ASSERT(event != nullptr);
std::vector<Measurement> measurements;
@@ -219,13 +220,13 @@ void Profiler::EndEvent(Event* event)
{
event->Stop();
- BOOST_ASSERT(!m_Parents.empty());
- BOOST_ASSERT(event == m_Parents.top());
+ ARMNN_ASSERT(!m_Parents.empty());
+ ARMNN_ASSERT(event == m_Parents.top());
m_Parents.pop();
Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
IgnoreUnused(parent);
- BOOST_ASSERT(event->GetParentEvent() == parent);
+ ARMNN_ASSERT(event->GetParentEvent() == parent);
#if ARMNN_STREAMLINE_ENABLED
ANNOTATE_CHANNEL_END(uint32_t(m_Parents.size()));
@@ -287,7 +288,7 @@ void ExtractJsonObjects(unsigned int inferenceIndex,
JsonChildObject& parentObject,
std::map<const Event*, std::vector<const Event*>> descendantsMap)
{
- BOOST_ASSERT(parentEvent);
+ ARMNN_ASSERT(parentEvent);
std::vector<Measurement> instrumentMeasurements = parentEvent->GetMeasurements();
unsigned int childIdx=0;
for(size_t measurementIndex = 0; measurementIndex < instrumentMeasurements.size(); ++measurementIndex, ++childIdx)
@@ -299,7 +300,7 @@ void ExtractJsonObjects(unsigned int inferenceIndex,
measurementObject.SetUnit(instrumentMeasurements[measurementIndex].m_Unit);
measurementObject.SetType(JsonObjectType::Measurement);
- BOOST_ASSERT(parentObject.NumChildren() == childIdx);
+ ARMNN_ASSERT(parentObject.NumChildren() == childIdx);
parentObject.AddChild(measurementObject);
}
diff --git a/src/armnn/QuantizerVisitor.cpp b/src/armnn/QuantizerVisitor.cpp
index 8e7c45f47f..16e8a602f8 100644
--- a/src/armnn/QuantizerVisitor.cpp
+++ b/src/armnn/QuantizerVisitor.cpp
@@ -24,15 +24,15 @@ QuantizerVisitor::QuantizerVisitor(const RangeTracker& rangeTracker,
void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* srcLayer,
IConnectableLayer* quantizedLayer)
{
- BOOST_ASSERT(srcLayer);
+ ARMNN_ASSERT(srcLayer);
for (unsigned int i = 0; i < srcLayer->GetNumInputSlots(); i++)
{
const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(i);
const InputSlot* inputSlot = boost::polymorphic_downcast<const InputSlot*>(&srcInputSlot);
- BOOST_ASSERT(inputSlot);
+ ARMNN_ASSERT(inputSlot);
const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
- BOOST_ASSERT(outputSlot);
+ ARMNN_ASSERT(outputSlot);
unsigned int slotIdx = outputSlot->CalculateIndexOnOwner();
Layer& layerToFind = outputSlot->GetOwningLayer();
@@ -40,7 +40,7 @@ void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* src
if (found == m_OriginalToQuantizedGuidMap.end())
{
// Error in graph traversal order
- BOOST_ASSERT_MSG(false, "Error in graph traversal");
+ ARMNN_ASSERT_MSG(false, "Error in graph traversal");
return;
}
@@ -68,13 +68,13 @@ ConstTensor QuantizerVisitor::CreateQuantizedBias(const IConnectableLayer* srcLa
const Optional<ConstTensor>& biases,
std::vector<int32_t>& backing)
{
- BOOST_ASSERT(srcLayer);
+ ARMNN_ASSERT(srcLayer);
const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(0);
auto inputSlot = boost::polymorphic_downcast<const InputSlot*>(&srcInputSlot);
- BOOST_ASSERT(inputSlot);
+ ARMNN_ASSERT(inputSlot);
const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
- BOOST_ASSERT(outputSlot);
+ ARMNN_ASSERT(outputSlot);
unsigned int slotIdx = outputSlot->CalculateIndexOnOwner();
Layer& layerToFind = outputSlot->GetOwningLayer();
@@ -82,7 +82,7 @@ ConstTensor QuantizerVisitor::CreateQuantizedBias(const IConnectableLayer* srcLa
if (found == m_OriginalToQuantizedGuidMap.end())
{
// Error in graph traversal order
- BOOST_ASSERT_MSG(false, "Error in graph traversal");
+ ARMNN_ASSERT_MSG(false, "Error in graph traversal");
return biases.value();
}
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index dfcbf852e0..f44606c762 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -192,7 +192,7 @@ Runtime::Runtime(const CreationOptions& options)
try {
auto factoryFun = BackendRegistryInstance().GetFactory(id);
auto backend = factoryFun();
- BOOST_ASSERT(backend.get() != nullptr);
+ ARMNN_ASSERT(backend.get() != nullptr);
auto context = backend->CreateBackendContext(options);
diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp
index 7705e687a9..446485f415 100644
--- a/src/armnn/SubgraphView.cpp
+++ b/src/armnn/SubgraphView.cpp
@@ -28,10 +28,10 @@ void AssertIfNullsOrDuplicates(const C& container, const std::string& errorMessa
IgnoreUnused(errorMessage);
// Check if the item is valid
- BOOST_ASSERT_MSG(i, errorMessage.c_str());
+ ARMNN_ASSERT_MSG(i, errorMessage.c_str());
// Check if a duplicate has been found
- BOOST_ASSERT_MSG(duplicateSet.find(i) == duplicateSet.end(), errorMessage.c_str());
+ ARMNN_ASSERT_MSG(duplicateSet.find(i) == duplicateSet.end(), errorMessage.c_str());
duplicateSet.insert(i);
});
diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp
index 02b7bdafa5..fa2fad9d4e 100644
--- a/src/armnn/SubgraphViewSelector.cpp
+++ b/src/armnn/SubgraphViewSelector.cpp
@@ -6,9 +6,9 @@
#include "SubgraphViewSelector.hpp"
#include "Graph.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <algorithm>
#include <map>
#include <queue>
@@ -80,14 +80,14 @@ public:
for (PartialSubgraph* a : m_Antecedents)
{
size_t numErased = a->m_Dependants.erase(this);
- BOOST_ASSERT(numErased == 1);
+ ARMNN_ASSERT(numErased == 1);
IgnoreUnused(numErased);
a->m_Dependants.insert(m_Parent);
}
for (PartialSubgraph* a : m_Dependants)
{
size_t numErased = a->m_Antecedents.erase(this);
- BOOST_ASSERT(numErased == 1);
+ ARMNN_ASSERT(numErased == 1);
IgnoreUnused(numErased);
a->m_Antecedents.insert(m_Parent);
}
@@ -197,7 +197,7 @@ struct LayerSelectionInfo
for (auto&& slot = m_Layer->BeginInputSlots(); slot != m_Layer->EndInputSlots(); ++slot)
{
OutputSlot* parentLayerOutputSlot = slot->GetConnectedOutputSlot();
- BOOST_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
+ ARMNN_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
if (parentLayerOutputSlot)
{
Layer& parentLayer = parentLayerOutputSlot->GetOwningLayer();
@@ -268,7 +268,7 @@ void ForEachLayerInput(LayerSelectionInfo::LayerInfoContainer& layerInfos,
for (auto inputSlot : layer.GetInputSlots())
{
auto connectedInput = boost::polymorphic_downcast<OutputSlot*>(inputSlot.GetConnection());
- BOOST_ASSERT_MSG(connectedInput, "Dangling input slot detected.");
+ ARMNN_ASSERT_MSG(connectedInput, "Dangling input slot detected.");
Layer& inputLayer = connectedInput->GetOwningLayer();
auto parentInfo = layerInfos.find(&inputLayer);
diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp
index aeb7ab5fdd..4dc6f0dc34 100644
--- a/src/armnn/Tensor.cpp
+++ b/src/armnn/Tensor.cpp
@@ -8,7 +8,8 @@
#include "armnn/Exceptions.hpp"
#include "armnn/TypesUtils.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
#include <sstream>
@@ -252,7 +253,7 @@ float TensorInfo::GetQuantizationScale() const
return 1.0f;
}
- BOOST_ASSERT(!HasMultipleQuantizationScales());
+ ARMNN_ASSERT(!HasMultipleQuantizationScales());
return m_Quantization.m_Scales[0];
}
diff --git a/src/armnn/TypesUtils.cpp b/src/armnn/TypesUtils.cpp
index f4f857f67a..9e58dc8f29 100644
--- a/src/armnn/TypesUtils.cpp
+++ b/src/armnn/TypesUtils.cpp
@@ -3,8 +3,8 @@
// SPDX-License-Identifier: MIT
//
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace
@@ -33,8 +33,8 @@ QuantizedType armnn::Quantize(float value, float scale, int32_t offset)
static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
constexpr QuantizedType max = std::numeric_limits<QuantizedType>::max();
constexpr QuantizedType min = std::numeric_limits<QuantizedType>::lowest();
- BOOST_ASSERT(scale != 0.f);
- BOOST_ASSERT(!std::isnan(value));
+ ARMNN_ASSERT(scale != 0.f);
+ ARMNN_ASSERT(!std::isnan(value));
float clampedValue = std::min(std::max(static_cast<float>(round(value/scale) + offset), static_cast<float>(min)),
static_cast<float>(max));
@@ -47,8 +47,8 @@ template <typename QuantizedType>
float armnn::Dequantize(QuantizedType value, float scale, int32_t offset)
{
static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
- BOOST_ASSERT(scale != 0.f);
- BOOST_ASSERT(!IsNan(value));
+ ARMNN_ASSERT(scale != 0.f);
+ ARMNN_ASSERT(!IsNan(value));
float dequantized = boost::numeric_cast<float>(value - offset) * scale;
return dequantized;
}
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index f67d965086..490b03ed79 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -36,7 +36,7 @@ void AbsLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"AbsLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index 263fb72c20..d310b7efbc 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -34,7 +34,7 @@ void ActivationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ActivationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index b67c42b2e4..a9907871be 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -34,7 +34,7 @@ ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const
std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape inputShape = inputShapes[0];
auto inputNumDimensions = inputShape.GetNumDimensions();
@@ -42,7 +42,7 @@ std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<Ten
auto axis = m_Param.m_Axis;
auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, axis);
- BOOST_ASSERT(unsignedAxis <= inputNumDimensions);
+ ARMNN_ASSERT(unsignedAxis <= inputNumDimensions);
// 1D input shape results in scalar output
if (inputShape.GetNumDimensions() == 1)
@@ -75,7 +75,7 @@ void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ArgMinMaxLayer: TensorShape set on OutputSlot does not match the inferred shape.",
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index aed744714b..7f61cad40f 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -21,10 +21,10 @@ BatchNormalizationLayer::BatchNormalizationLayer(const armnn::BatchNormalization
std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
- BOOST_ASSERT_MSG(m_Mean != nullptr, "BatchNormalizationLayer: Mean data should not be null.");
- BOOST_ASSERT_MSG(m_Variance != nullptr, "BatchNormalizationLayer: Variance data should not be null.");
- BOOST_ASSERT_MSG(m_Beta != nullptr, "BatchNormalizationLayer: Beta data should not be null.");
- BOOST_ASSERT_MSG(m_Gamma != nullptr, "BatchNormalizationLayer: Gamma data should not be null.");
+ ARMNN_ASSERT_MSG(m_Mean != nullptr, "BatchNormalizationLayer: Mean data should not be null.");
+ ARMNN_ASSERT_MSG(m_Variance != nullptr, "BatchNormalizationLayer: Variance data should not be null.");
+ ARMNN_ASSERT_MSG(m_Beta != nullptr, "BatchNormalizationLayer: Beta data should not be null.");
+ ARMNN_ASSERT_MSG(m_Gamma != nullptr, "BatchNormalizationLayer: Gamma data should not be null.");
BatchNormalizationQueueDescriptor descriptor;
@@ -54,7 +54,7 @@ void BatchNormalizationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"BatchNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 7e7045291c..1da88c63ac 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -47,7 +47,7 @@ void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"BatchToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
@@ -56,7 +56,7 @@ void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& inputShape = inputShapes[0];
TensorShape outputShape(inputShape);
@@ -66,7 +66,7 @@ std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vecto
1U,
std::multiplies<>());
- BOOST_ASSERT(inputShape[0] % accumulatedBlockShape == 0);
+ ARMNN_ASSERT(inputShape[0] % accumulatedBlockShape == 0);
outputShape[0] = inputShape[0] / accumulatedBlockShape;
@@ -80,10 +80,10 @@ std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vecto
unsigned int outputHeight = inputShape[heightIndex] * m_Param.m_BlockShape[0];
unsigned int outputWidth = inputShape[widthIndex] * m_Param.m_BlockShape[1];
- BOOST_ASSERT_MSG(heightCrop <= outputHeight,
+ ARMNN_ASSERT_MSG(heightCrop <= outputHeight,
"BatchToSpaceLayer: Overall height crop should be less than or equal to the uncropped output height.");
- BOOST_ASSERT_MSG(widthCrop <= outputWidth,
+ ARMNN_ASSERT_MSG(widthCrop <= outputWidth,
"BatchToSpaceLayer: Overall width crop should be less than or equal to the uncropped output width.");
outputShape[heightIndex] = outputHeight - heightCrop;
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 1f6e35fa85..91080457bf 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -33,11 +33,11 @@ ComparisonLayer* ComparisonLayer::Clone(Graph& graph) const
std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& input0 = inputShapes[0];
const TensorShape& input1 = inputShapes[1];
- BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
unsigned int numDims = input0.GetNumDimensions();
std::vector<unsigned int> dims(numDims);
@@ -46,7 +46,7 @@ std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<Te
unsigned int dim0 = input0[i];
unsigned int dim1 = input1[i];
- BOOST_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
+ ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
"Dimensions should either match or one should be of size 1.");
dims[i] = std::max(dim0, dim1);
@@ -63,7 +63,7 @@ void ComparisonLayer::ValidateTensorShapesFromInputs()
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ComparisonLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index f4024af65a..5df5ec8de5 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -111,7 +111,7 @@ void ConcatLayer::CreateTensors(const FactoryType& factory)
OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
OutputHandler& outputHandler = slot->GetOutputHandler();
- BOOST_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
+ ARMNN_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
outputHandler.SetData(std::move(subTensor));
Layer& inputLayer = slot->GetOwningLayer();
@@ -141,7 +141,7 @@ void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registr
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
- BOOST_ASSERT(handleFactory);
+ ARMNN_ASSERT(handleFactory);
CreateTensors(*handleFactory);
}
}
@@ -153,7 +153,7 @@ ConcatLayer* ConcatLayer::Clone(Graph& graph) const
std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
+ ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
unsigned int numDims = m_Param.GetNumDimensions();
for (unsigned int i=0; i< inputShapes.size(); i++)
@@ -259,7 +259,7 @@ void ConcatLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes(inputShapes);
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConcatLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index 147aa8f46a..30d20b87d6 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -36,7 +36,7 @@ void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConvertBf16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 7873c94563..08f0e4a8c1 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -36,7 +36,7 @@ void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConvertFp16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index 936acf61ab..c9e0962dd5 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -36,7 +36,7 @@ void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConvertFp32ToBf16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index bbf4dbffd8..95403e9e75 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -35,7 +35,7 @@ void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConvertFp32ToFp16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 55a243aa0b..d82908a128 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -49,7 +49,7 @@ void Convolution2dLayer::SerializeLayerParameters(ParameterStringifyFunction& fn
std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
- BOOST_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
Convolution2dQueueDescriptor descriptor;
@@ -57,7 +57,7 @@ std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFac
if (m_Param.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
+ ARMNN_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
@@ -79,12 +79,12 @@ Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const
std::vector<TensorShape> Convolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& inputShape = inputShapes[0];
const TensorShape filterShape = inputShapes[1];
// If we support multiple batch dimensions in the future, then this assert will need to change.
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
@@ -117,13 +117,13 @@ void Convolution2dLayer::ValidateTensorShapesFromInputs()
VerifyLayerConnections(1, CHECK_LOCATION());
// check if we m_Weight data is not nullptr
- BOOST_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
m_Weight->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"Convolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 76d33f27e9..6aaf945878 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -41,7 +41,7 @@ void DebugLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"DebugLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index bb74232690..2d13271c77 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -38,7 +38,7 @@ DepthToSpaceLayer* DepthToSpaceLayer::Clone(Graph& graph) const
std::vector<TensorShape> DepthToSpaceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape inputShape = inputShapes[0];
TensorShape outputShape(inputShape);
@@ -64,7 +64,7 @@ void DepthToSpaceLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"DepthToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index f37096ac18..dc6b2c2fe7 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -51,7 +51,7 @@ void DepthwiseConvolution2dLayer::SerializeLayerParameters(ParameterStringifyFun
std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
- BOOST_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
DepthwiseConvolution2dQueueDescriptor descriptor;
@@ -59,7 +59,7 @@ std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWo
if (m_Param.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(m_Bias != nullptr, "DepthwiseConvolution2dLayer: Bias data should not be null.");
+ ARMNN_ASSERT_MSG(m_Bias != nullptr, "DepthwiseConvolution2dLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
return factory.CreateDepthwiseConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
@@ -81,11 +81,11 @@ DepthwiseConvolution2dLayer* DepthwiseConvolution2dLayer::Clone(Graph& graph) co
std::vector<TensorShape>
DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& inputShape = inputShapes[0];
const TensorShape& filterShape = inputShapes[1];
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
@@ -124,14 +124,14 @@ void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs()
VerifyLayerConnections(1, CHECK_LOCATION());
// on this level constant data should not be released..
- BOOST_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
m_Weight->GetTensorInfo().GetShape()
});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"DepthwiseConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index 00a1d697b6..5b57279c43 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -36,7 +36,7 @@ void DequantizeLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"DequantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 8749b33ba2..e8d14d928c 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -39,9 +39,9 @@ void DetectionPostProcessLayer::ValidateTensorShapesFromInputs()
VerifyLayerConnections(2, CHECK_LOCATION());
// on this level constant data should not be released.
- BOOST_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
+ ARMNN_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
- BOOST_ASSERT_MSG(GetNumOutputSlots() == 4, "DetectionPostProcessLayer: The layer should return 4 outputs.");
+ ARMNN_ASSERT_MSG(GetNumOutputSlots() == 4, "DetectionPostProcessLayer: The layer should return 4 outputs.");
unsigned int detectedBoxes = m_Param.m_MaxDetections * m_Param.m_MaxClassesPerDetection;
diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp
index 761814176d..2c1e8717f4 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.cpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.cpp
@@ -8,8 +8,7 @@
#include "InternalTypes.hpp"
#include "armnn/Exceptions.hpp"
#include <armnn/TypesUtils.hpp>
-
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -22,12 +21,12 @@ ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots, unsigned
std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
auto& input0 = inputShapes[0];
auto& input1 = inputShapes[1];
// Get the max of the inputs.
- BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
unsigned int numDims = input0.GetNumDimensions();
std::vector<unsigned int> dims(numDims);
@@ -38,7 +37,7 @@ std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vect
#if !NDEBUG
// Validate inputs are broadcast compatible.
- BOOST_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
+ ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
"Dimensions should either match or one should be of size 1.");
#endif
@@ -57,7 +56,7 @@ void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
std::string msg = GetLayerTypeAsCString(GetType());
msg += "Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.";
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index d3843da060..c91057cc9f 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -34,7 +34,7 @@ ElementwiseUnaryLayer* ElementwiseUnaryLayer::Clone(Graph& graph) const
std::vector<TensorShape> ElementwiseUnaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
// Should return the shape of the input tensor
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& input = inputShapes[0];
return std::vector<TensorShape>({ input });
@@ -46,7 +46,7 @@ void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ElementwiseUnaryLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 8611b9b73c..2b4ad8605f 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -35,7 +35,7 @@ void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"FakeQuantizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 148543cf62..fb918f6e7a 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -35,7 +35,7 @@ void FloorLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"FloorLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 6b36bad713..4bbc9ba890 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -22,14 +22,14 @@ FullyConnectedLayer::FullyConnectedLayer(const FullyConnectedDescriptor& param,
std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
- BOOST_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
FullyConnectedQueueDescriptor descriptor;
descriptor.m_Weight = m_Weight.get();
if (m_Param.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
+ ARMNN_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor));
@@ -50,7 +50,7 @@ FullyConnectedLayer* FullyConnectedLayer::Clone(Graph& graph) const
std::vector<TensorShape> FullyConnectedLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& inputShape = inputShapes[0];
const TensorShape weightShape = inputShapes[1];
@@ -66,13 +66,13 @@ void FullyConnectedLayer::ValidateTensorShapesFromInputs()
VerifyLayerConnections(1, CHECK_LOCATION());
// check if we m_Weight data is not nullptr
- BOOST_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
m_Weight->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"FullyConnectedLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 9e0212f226..25b133acda 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -35,7 +35,7 @@ void InstanceNormalizationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"InstanceNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index 3d9dc538f5..e6d5f064f3 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -35,7 +35,7 @@ void L2NormalizationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"L2NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 24b6fde339..627aa4cdd3 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -34,7 +34,7 @@ void LogSoftmaxLayer::ValidateTensorShapesFromInputs()
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"LogSoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 1d945690d5..653b18a1c9 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -147,7 +147,7 @@ LstmLayer* LstmLayer::Clone(Graph& graph) const
std::vector<TensorShape> LstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 3);
+ ARMNN_ASSERT(inputShapes.size() == 3);
// Get input values for validation
unsigned int batchSize = inputShapes[0][0];
@@ -173,35 +173,35 @@ void LstmLayer::ValidateTensorShapesFromInputs()
GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape()}
);
- BOOST_ASSERT(inferredShapes.size() == 4);
+ ARMNN_ASSERT(inferredShapes.size() == 4);
// Check if the weights are nullptr
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
"LstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
"LstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
"LstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
"LstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
"LstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
"LstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
"LstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
"LstmLayer: m_BasicParameters.m_CellBias should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
"LstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
if (!m_Param.m_CifgEnabled)
{
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
"LstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
"LstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
"LstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -211,11 +211,11 @@ void LstmLayer::ValidateTensorShapesFromInputs()
}
else
{
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
"LstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
"LstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not have a value when CIFG is enabled.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
"LstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -226,7 +226,7 @@ void LstmLayer::ValidateTensorShapesFromInputs()
if (m_Param.m_ProjectionEnabled)
{
- BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
"LstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
}
@@ -234,13 +234,13 @@ void LstmLayer::ValidateTensorShapesFromInputs()
{
if (!m_Param.m_CifgEnabled)
{
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
"LstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
"when Peephole is enabled and CIFG is disabled.");
}
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
"LstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
"LstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
}
@@ -261,14 +261,14 @@ void LstmLayer::ValidateTensorShapesFromInputs()
{
if(!m_Param.m_CifgEnabled)
{
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
"LstmLayer: m_LayerNormParameters.m_inputLayerNormWeights should not be null.");
}
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
"LstmLayer: m_LayerNormParameters.m_forgetLayerNormWeights should not be null.");
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
"LstmLayer: m_LayerNormParameters.m_cellLayerNormWeights should not be null.");
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
"LstmLayer: m_LayerNormParameters.m_outputLayerNormWeights should not be null.");
}
}
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index 30b88fa1b9..5fa88f9398 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -44,7 +44,7 @@ void MeanLayer::ValidateTensorShapesFromInputs()
const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
- BOOST_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
+ ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
"MeanLayer: Mean supports up to 4D input.");
unsigned int rank = input.GetNumDimensions();
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index cf69c17cf5..e4009de022 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -39,7 +39,7 @@ void MemCopyLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"MemCopyLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 80f9fda803..bcccba1f4a 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -39,7 +39,7 @@ void MemImportLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"MemImportLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index f2fd29fe9e..ad7d8b1416 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -36,7 +36,7 @@ void MergeLayer::ValidateTensorShapesFromInputs()
GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(),
});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"MergeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
@@ -46,7 +46,7 @@ void MergeLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
ConditionalThrowIfNotEqual<LayerValidationException>(
"MergeLayer: TensorShapes set on inputs do not match",
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index 09f8a0d00e..44179fd534 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -35,7 +35,7 @@ void NormalizationLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index 0fc3ce4bf6..e565b48b57 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -35,7 +35,7 @@ PermuteLayer* PermuteLayer::Clone(Graph& graph) const
std::vector<TensorShape> PermuteLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& inShape = inputShapes[0];
return std::vector<TensorShape> ({armnnUtils::Permuted(inShape, m_Param.m_DimMappings)});
}
@@ -46,7 +46,7 @@ void PermuteLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"PermuteLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index a3c2425097..ad2c82f761 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -37,12 +37,12 @@ Pooling2dLayer* Pooling2dLayer::Clone(Graph& graph) const
std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& inputShape = inputShapes[0];
const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
// If we support multiple batch dimensions in the future, then this assert will need to change.
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
@@ -54,7 +54,7 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
unsigned int outHeight = 1;
if (!isGlobalPooling)
{
- BOOST_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
+ ARMNN_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
"Stride can only be zero when performing global pooling");
auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
@@ -72,7 +72,7 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
size = static_cast<unsigned int>(floor(div)) + 1;
break;
default:
- BOOST_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
+ ARMNN_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
}
// MakeS sure that border operations will start from inside the input and not the padded area.
@@ -106,7 +106,7 @@ void Pooling2dLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"Pooling2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index d9e59224a0..609480673b 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -34,7 +34,7 @@ PreluLayer* PreluLayer::Clone(Graph& graph) const
std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& inputShape = inputShapes[0];
const TensorShape& alphaShape = inputShapes[1];
@@ -42,8 +42,8 @@ std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorS
const unsigned int inputShapeDimensions = inputShape.GetNumDimensions();
const unsigned int alphaShapeDimensions = alphaShape.GetNumDimensions();
- BOOST_ASSERT(inputShapeDimensions > 0);
- BOOST_ASSERT(alphaShapeDimensions > 0);
+ ARMNN_ASSERT(inputShapeDimensions > 0);
+ ARMNN_ASSERT(alphaShapeDimensions > 0);
// The size of the output is the maximum size along each dimension of the input operands,
// it starts with the trailing dimensions, and works its way forward
@@ -63,7 +63,7 @@ std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorS
unsigned int alphaDimension = alphaShape[boost::numeric_cast<unsigned int>(alphaShapeIndex)];
// Check that the inputs are broadcast compatible
- BOOST_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
+ ARMNN_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
"PreluLayer: Dimensions should either match or one should be of size 1");
outputShape[outputShapeIndex] = std::max(inputDimension, alphaDimension);
@@ -104,7 +104,7 @@ void PreluLayer::ValidateTensorShapesFromInputs()
GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"PreluLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index 393a7029aa..9b940c1823 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -150,7 +150,7 @@ QLstmLayer* QLstmLayer::Clone(Graph& graph) const
std::vector<TensorShape> QLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 3);
+ ARMNN_ASSERT(inputShapes.size() == 3);
// Get input values for validation
unsigned int batchSize = inputShapes[0][0];
@@ -176,35 +176,35 @@ void QLstmLayer::ValidateTensorShapesFromInputs()
GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() // previousCellStateIn
});
- BOOST_ASSERT(inferredShapes.size() == 3);
+ ARMNN_ASSERT(inferredShapes.size() == 3);
// Check if the weights are nullptr for basic params
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
"QLstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
"QLstmLayer: m_BasicParameters.m_CellBias should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
"QLstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
if (!m_Param.m_CifgEnabled)
{
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
"QLstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
"QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
"QLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -214,12 +214,12 @@ void QLstmLayer::ValidateTensorShapesFromInputs()
}
else
{
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
"QLstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
"QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should "
"not have a value when CIFG is enabled.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
"QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -230,23 +230,23 @@ void QLstmLayer::ValidateTensorShapesFromInputs()
if (m_Param.m_ProjectionEnabled)
{
- BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
"QLstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
- BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionBias != nullptr,
+ ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionBias != nullptr,
"QLstmLayer: m_ProjectionParameters.m_ProjectionBias should not be null.");
}
if (m_Param.m_PeepholeEnabled)
{
if (!m_Param.m_CifgEnabled) {
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
"QLstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
"when Peephole is enabled and CIFG is disabled.");
}
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
"QLstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
"QLstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
}
@@ -263,14 +263,14 @@ void QLstmLayer::ValidateTensorShapesFromInputs()
{
if(!m_Param.m_CifgEnabled)
{
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
"QLstmLayer: m_LayerNormParameters.m_InputLayerNormWeights should not be null.");
}
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
"QLstmLayer: m_LayerNormParameters.m_ForgetLayerNormWeights should not be null.");
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
"QLstmLayer: m_LayerNormParameters.m_CellLayerNormWeights should not be null.");
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
"QLstmLayer: m_LayerNormParameters.m_UutputLayerNormWeights should not be null.");
}
}
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 8717041a53..b56ae3ff52 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -78,7 +78,7 @@ QuantizedLstmLayer* QuantizedLstmLayer::Clone(Graph& graph) const
std::vector<TensorShape> QuantizedLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 3);
+ ARMNN_ASSERT(inputShapes.size() == 3);
// Get input values for validation
unsigned int numBatches = inputShapes[0][0];
@@ -102,34 +102,34 @@ void QuantizedLstmLayer::ValidateTensorShapesFromInputs()
GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() // previousOutputIn
});
- BOOST_ASSERT(inferredShapes.size() == 2);
+ ARMNN_ASSERT(inferredShapes.size() == 2);
// Check weights and bias for nullptr
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToInputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToCellWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputGateBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputGateBias should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_ForgetGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_ForgetGateBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_ForgetGateBias should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_CellBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_CellBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_CellBias should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_OutputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_OutputGateBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
// Check output TensorShape(s) match inferred shape
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index fbf3eaa80a..b496dbb642 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -42,7 +42,7 @@ void ReshapeLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ReshapeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index e341191de1..9654e58b43 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -36,7 +36,7 @@ ResizeLayer* ResizeLayer::Clone(Graph& graph) const
std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& inputShape = inputShapes[0];
const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
@@ -59,7 +59,7 @@ void ResizeLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ResizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index 6ff7372aa7..dfd466dca3 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -36,7 +36,7 @@ void RsqrtLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"RsqrtLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index ec82082c4a..d92ed6fc48 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -12,7 +12,6 @@
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
@@ -40,7 +39,7 @@ void SliceLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"SliceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
@@ -51,7 +50,7 @@ void SliceLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
IgnoreUnused(inputShapes);
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index cb70bbc20d..738347c1b3 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -35,7 +35,7 @@ void SoftmaxLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"SoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index ec724bafd0..ce48b5b5c2 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -41,7 +41,7 @@ SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
std::vector<TensorShape> SpaceToBatchNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape inputShape = inputShapes[0];
TensorShape outputShape(inputShape);
@@ -73,7 +73,7 @@ void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"SpaceToBatchNdLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 8aa0c9f8cd..bf65240e0c 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -41,7 +41,7 @@ SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
std::vector<TensorShape> SpaceToDepthLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape inputShape = inputShapes[0];
TensorShape outputShape(inputShape);
@@ -66,7 +66,7 @@ void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"SpaceToDepthLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index f655e712c8..8ec8121495 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -115,7 +115,7 @@ void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& regis
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
- BOOST_ASSERT(handleFactory);
+ ARMNN_ASSERT(handleFactory);
CreateTensors(*handleFactory);
}
}
@@ -128,7 +128,7 @@ SplitterLayer* SplitterLayer::Clone(Graph& graph) const
std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
IgnoreUnused(inputShapes);
- BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
+ ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
std::vector<TensorShape> outShapes;
//Output shapes must match View shapes.
for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
@@ -150,7 +150,7 @@ void SplitterLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes(views);
- BOOST_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
+ ARMNN_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
{
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 6f793caecc..e034cb46a6 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -38,7 +38,7 @@ std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorS
const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
const unsigned int axis = m_Param.m_Axis;
- BOOST_ASSERT(axis <= inputNumDimensions);
+ ARMNN_ASSERT(axis <= inputNumDimensions);
std::vector<unsigned int> dimensionSizes(inputNumDimensions + 1, 0);
for (unsigned int i = 0; i < axis; ++i)
@@ -84,7 +84,7 @@ void StackLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes(inputShapes);
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"StackLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index c31b9a4280..b100f7ab6b 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -45,7 +45,7 @@ StridedSliceLayer* StridedSliceLayer::Clone(Graph& graph) const
std::vector<TensorShape> StridedSliceLayer::InferOutputShapes(
const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape inputShape = inputShapes[0];
std::vector<unsigned int> outputShape;
@@ -86,7 +86,7 @@ void StridedSliceLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"StridedSlice: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index 4cacda6318..c4b065a735 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -31,14 +31,14 @@ void SwitchLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
- BOOST_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
+ ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
// Assuming first input is the Input and second input is the Constant
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 2);
+ ARMNN_ASSERT(inferredShapes.size() == 2);
ConditionalThrowIfNotEqual<LayerValidationException>(
"SwitchLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index dca77b4c09..05941f7d78 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -26,14 +26,14 @@ TransposeConvolution2dLayer::TransposeConvolution2dLayer(const TransposeConvolut
std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
TransposeConvolution2dQueueDescriptor descriptor;
descriptor.m_Weight = m_Weight.get();
if (m_Param.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
+ ARMNN_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
@@ -57,11 +57,11 @@ TransposeConvolution2dLayer* TransposeConvolution2dLayer::Clone(Graph& graph) co
std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& inputShape = inputShapes[0];
const TensorShape& kernelShape = inputShapes[1];
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
@@ -82,8 +82,8 @@ std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
unsigned int kernelElements = kernelShape[0] * kernelShape[dataLayoutIndex.GetChannelsIndex()];
unsigned int inputElements = batches * inputShape[dataLayoutIndex.GetChannelsIndex()];
- BOOST_ASSERT_MSG(inputElements != 0, "Invalid number of input elements");
- BOOST_ASSERT_MSG(kernelElements % inputElements == 0, "Invalid number of elements");
+ ARMNN_ASSERT_MSG(inputElements != 0, "Invalid number of input elements");
+ ARMNN_ASSERT_MSG(kernelElements % inputElements == 0, "Invalid number of elements");
unsigned int channels = kernelElements / inputElements;
@@ -98,13 +98,13 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
- BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
m_Weight->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"TransposeConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 3c22b545b9..c058332c90 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -35,7 +35,7 @@ TransposeLayer* TransposeLayer::Clone(Graph& graph) const
std::vector<TensorShape> TransposeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& inShape = inputShapes[0];
return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
}
@@ -46,7 +46,7 @@ void TransposeLayer::ValidateTensorShapesFromInputs()
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"TransposeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
diff --git a/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp b/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp
index b2a2ba43ed..e598deb977 100644
--- a/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp
+++ b/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp
@@ -21,8 +21,8 @@ public:
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT(base.GetType() == LayerType::Pad);
- BOOST_ASSERT(child.GetType() == LayerType::Convolution2d);
+ ARMNN_ASSERT(base.GetType() == LayerType::Pad);
+ ARMNN_ASSERT(child.GetType() == LayerType::Convolution2d);
PadLayer* padLayer = boost::polymorphic_downcast<PadLayer*>(&base);
Convolution2dLayer* convolution2dLayer = boost::polymorphic_downcast<Convolution2dLayer*>(&child);
@@ -60,12 +60,12 @@ public:
newConv2dLayer.GetOutputHandler().SetTensorInfo(outInfo);
// Copy weights and bias to the new convolution layer
- BOOST_ASSERT_MSG(convolution2dLayer->m_Weight != nullptr,
+ ARMNN_ASSERT_MSG(convolution2dLayer->m_Weight != nullptr,
"FoldPadIntoConvolution2d: Weights data should not be null.");
newConv2dLayer.m_Weight = std::move(convolution2dLayer->m_Weight);
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(convolution2dLayer->m_Bias != nullptr,
+ ARMNN_ASSERT_MSG(convolution2dLayer->m_Bias != nullptr,
"FoldPadIntoConvolution2d: Bias data should not be null if bias is enabled.");
newConv2dLayer.m_Bias = std::move(convolution2dLayer->m_Bias);
}
diff --git a/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
index 53d4a3c4fd..39bfe6e936 100644
--- a/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
+++ b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
@@ -21,8 +21,8 @@ public:
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT(base.GetType() == LayerType::Reshape);
- BOOST_ASSERT(child.GetType() == LayerType::Reshape);
+ ARMNN_ASSERT(base.GetType() == LayerType::Reshape);
+ ARMNN_ASSERT(child.GetType() == LayerType::Reshape);
OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot();
diff --git a/src/armnn/optimizations/OptimizeInverseConversions.hpp b/src/armnn/optimizations/OptimizeInverseConversions.hpp
index 3ea4a5b279..d479445ce3 100644
--- a/src/armnn/optimizations/OptimizeInverseConversions.hpp
+++ b/src/armnn/optimizations/OptimizeInverseConversions.hpp
@@ -24,7 +24,7 @@ public:
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT((base.GetType() == LayerType::ConvertFp16ToFp32 &&
+ ARMNN_ASSERT((base.GetType() == LayerType::ConvertFp16ToFp32 &&
child.GetType() == LayerType::ConvertFp32ToFp16) ||
(base.GetType() == LayerType::ConvertFp32ToFp16 &&
child.GetType() == LayerType::ConvertFp16ToFp32));
diff --git a/src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp b/src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp
index 21aed869f5..ea4de9df6f 100644
--- a/src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp
+++ b/src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp
@@ -22,7 +22,7 @@ public:
{
// Validate base layer (the Permute) is compatible
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
- BOOST_ASSERT(base.GetType() == LayerType::Permute || base.GetType() == LayerType::Transpose);
+ ARMNN_ASSERT(base.GetType() == LayerType::Permute || base.GetType() == LayerType::Transpose);
const TensorInfo& inputInfo = base.GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& intermediateInfo = base.GetOutputSlot(0).GetTensorInfo();
if (intermediateInfo.GetNumDimensions() != 4)
@@ -39,7 +39,7 @@ public:
// Validate child layer (the BatchToSpace) is compatible
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT(child.GetType() == LayerType::BatchToSpaceNd);
+ ARMNN_ASSERT(child.GetType() == LayerType::BatchToSpaceNd);
const TensorInfo& outputInfo = child.GetOutputSlot(0).GetTensorInfo();
const BatchToSpaceNdDescriptor& batchToSpaceDesc = static_cast<BatchToSpaceNdLayer&>(child).GetParameters();
if (batchToSpaceDesc.m_DataLayout != DataLayout::NHWC)
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index a7b23dbd86..c7883ffdb8 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -203,8 +203,8 @@ BOOST_AUTO_TEST_CASE(InsertConvertersTest)
{
if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
{
- BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
- BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
+ ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
+ ARMNN_ASSERT(layer->GetDataType() == DataType::Float16);
}
}
@@ -223,18 +223,18 @@ BOOST_AUTO_TEST_CASE(InsertConvertersTest)
{
if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
{
- BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
- BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
+ ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
+ ARMNN_ASSERT(layer->GetDataType() == DataType::Float32);
}
else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
{
- BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
- BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
+ ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
+ ARMNN_ASSERT(layer->GetDataType() == DataType::Float16);
}
else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
{
- BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
- BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
+ ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
+ ARMNN_ASSERT(layer->GetDataType() == DataType::Float32);
}
}
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index ef9b2da782..ebdfbc5a40 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -336,7 +336,7 @@ TensorInfo GetInputTensorInfo(const Network* network)
{
for (auto&& inputLayer : network->GetGraph().GetInputLayers())
{
- BOOST_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+ ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
return inputLayer->GetOutputSlot(0).GetTensorInfo();
}
throw InvalidArgumentException("Network has no input layers");
diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp
index 3f8589353c..ca148edefb 100644
--- a/src/armnn/test/TensorHelpers.hpp
+++ b/src/armnn/test/TensorHelpers.hpp
@@ -5,10 +5,10 @@
#pragma once
#include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
#include <QuantizeHelper.hpp>
-#include <boost/assert.hpp>
#include <boost/multi_array.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/random/uniform_real_distribution.hpp>
@@ -192,7 +192,7 @@ boost::multi_array<T, n> MakeTensor(const armnn::TensorInfo& tensorInfo)
template <typename T, std::size_t n>
boost::multi_array<T, n> MakeTensor(const armnn::TensorInfo& tensorInfo, const std::vector<T>& flat)
{
- BOOST_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor");
+ ARMNN_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor");
std::array<unsigned int, n> shape;
diff --git a/src/armnn/test/TestUtils.cpp b/src/armnn/test/TestUtils.cpp
index 8ef820b3d5..6d7d02dcff 100644
--- a/src/armnn/test/TestUtils.cpp
+++ b/src/armnn/test/TestUtils.cpp
@@ -5,15 +5,15 @@
#include "TestUtils.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
using namespace armnn;
void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
unsigned int fromIndex, unsigned int toIndex)
{
- BOOST_ASSERT(from);
- BOOST_ASSERT(to);
+ ARMNN_ASSERT(from);
+ ARMNN_ASSERT(to);
from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo);
diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp
index ce5c5bd4f5..b95d3bcc08 100644
--- a/src/armnnCaffeParser/CaffeParser.cpp
+++ b/src/armnnCaffeParser/CaffeParser.cpp
@@ -13,8 +13,9 @@
#include "GraphTopologicalSort.hpp"
#include "VerificationHelpers.hpp"
+#include <armnn/utility/Assert.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
// Caffe
@@ -363,7 +364,7 @@ vector<const LayerParameter*> CaffeParserBase::GetInputs(const LayerParameter& l
void CaffeParserBase::ParseInputLayer(const LayerParameter& layerParam)
{
- BOOST_ASSERT(layerParam.type() == "Input");
+ ARMNN_ASSERT(layerParam.type() == "Input");
ValidateNumInputsOutputs(layerParam, 0, 1);
const InputParameter& param = layerParam.input_param();
@@ -421,7 +422,7 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP
unsigned int kernelW,
unsigned int kernelH)
{
- BOOST_ASSERT(layerParam.type() == "Convolution");
+ ARMNN_ASSERT(layerParam.type() == "Convolution");
ValidateNumInputsOutputs(layerParam, 1, 1);
ConvolutionParameter convParam = layerParam.convolution_param();
@@ -429,8 +430,8 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP
const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
// asusme these were already verified by the caller ParseConvLayer() function
- BOOST_ASSERT(numGroups < inputShape.dim(1));
- BOOST_ASSERT(numGroups > 1);
+ ARMNN_ASSERT(numGroups < inputShape.dim(1));
+ ARMNN_ASSERT(numGroups > 1);
// Handle grouping
armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
@@ -613,7 +614,7 @@ void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter&
unsigned int kernelW,
unsigned int kernelH)
{
- BOOST_ASSERT(layerParam.type() == "Convolution");
+ ARMNN_ASSERT(layerParam.type() == "Convolution");
ValidateNumInputsOutputs(layerParam, 1, 1);
ConvolutionParameter convParam = layerParam.convolution_param();
@@ -711,7 +712,7 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam)
// Not Available ArmNN Interface Parameters
// * Rounding policy;
- BOOST_ASSERT(layerParam.type() == "Convolution");
+ ARMNN_ASSERT(layerParam.type() == "Convolution");
ValidateNumInputsOutputs(layerParam, 1, 1);
ConvolutionParameter convParam = layerParam.convolution_param();
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 58232a2763..2975675ff1 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -13,6 +13,7 @@
#include <armnnUtils/Permute.hpp>
#include <armnnUtils/Transpose.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <ParserHelper.hpp>
@@ -20,7 +21,6 @@
#include <boost/filesystem.hpp>
#include <boost/format.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
@@ -725,7 +725,7 @@ Deserializer::GraphPtr Deserializer::LoadGraphFromBinary(const uint8_t* binaryCo
INetworkPtr Deserializer::CreateNetworkFromGraph(GraphPtr graph)
{
m_Network = INetwork::Create();
- BOOST_ASSERT(graph != nullptr);
+ ARMNN_ASSERT(graph != nullptr);
unsigned int layerIndex = 0;
for (AnyLayer const* layer : *graph->layers())
{
@@ -883,7 +883,7 @@ void Deserializer::SetupInputLayers(GraphPtr graph)
// GetBindingLayerInfo expect the index to be index in the vector not index property on each layer base
LayerBindingId bindingId = GetBindingLayerInfo(graph, inputLayerIndex);
- BOOST_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
+ ARMNN_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
IConnectableLayer* inputLayer =
m_Network->AddInputLayer(bindingId, baseLayer->layerName()->c_str());
@@ -922,7 +922,7 @@ void Deserializer::SetupOutputLayers(GraphPtr graph)
// GetBindingLayerInfo expect the index to be index in the vector not index property on each layer base
LayerBindingId bindingId = GetBindingLayerInfo(graph, outputLayerIndex);
- BOOST_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
+ ARMNN_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
IConnectableLayer* outputLayer =
m_Network->AddOutputLayer(bindingId, baseLayer->layerName()->c_str());
@@ -944,7 +944,7 @@ void Deserializer::RegisterOutputSlots(GraphPtr graph,
IConnectableLayer* layer)
{
CHECK_LAYERS(graph, 0, layerIndex);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
LayerBaseRawPtr baseLayer = GetBaseLayer(graph, layerIndex);
if (baseLayer->outputSlots()->size() != layer->GetNumOutputSlots())
{
@@ -971,7 +971,7 @@ void Deserializer::RegisterInputSlots(GraphPtr graph,
armnn::IConnectableLayer* layer)
{
CHECK_LAYERS(graph, 0, layerIndex);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
LayerBaseRawPtr baseLayer = GetBaseLayer(graph, layerIndex);
if (baseLayer->inputSlots()->size() != layer->GetNumInputSlots())
{
@@ -1845,7 +1845,7 @@ armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::Pool
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported pooling algorithm");
+ ARMNN_ASSERT_MSG(false, "Unsupported pooling algorithm");
}
}
@@ -1863,7 +1863,7 @@ armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::Pool
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported output shape rounding");
+ ARMNN_ASSERT_MSG(false, "Unsupported output shape rounding");
}
}
@@ -1881,7 +1881,7 @@ armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::Pool
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported padding method");
+ ARMNN_ASSERT_MSG(false, "Unsupported padding method");
}
}
@@ -1899,7 +1899,7 @@ armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::Pool
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported data layout");
+ ARMNN_ASSERT_MSG(false, "Unsupported data layout");
}
}
@@ -2197,7 +2197,7 @@ armnn::NormalizationDescriptor Deserializer::GetNormalizationDescriptor(
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported normalization channel type");
+ ARMNN_ASSERT_MSG(false, "Unsupported normalization channel type");
}
}
@@ -2215,7 +2215,7 @@ armnn::NormalizationDescriptor Deserializer::GetNormalizationDescriptor(
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported normalization method type");
+ ARMNN_ASSERT_MSG(false, "Unsupported normalization method type");
}
}
@@ -2233,7 +2233,7 @@ armnn::NormalizationDescriptor Deserializer::GetNormalizationDescriptor(
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported data layout");
+ ARMNN_ASSERT_MSG(false, "Unsupported data layout");
}
}
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index 91d07f304a..bb38d5f4b4 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -14,10 +14,10 @@
#include <ArmnnSchema_generated.h>
#include <armnn/IRuntime.hpp>
#include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <ResolveType.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
@@ -96,10 +96,10 @@ struct ParserFlatbuffersSerializeFixture
flatbuffers::Parser parser;
bool ok = parser.Parse(schemafile.c_str());
- BOOST_ASSERT_MSG(ok, "Failed to parse schema file");
+ ARMNN_ASSERT_MSG(ok, "Failed to parse schema file");
ok &= parser.Parse(m_JsonString.c_str());
- BOOST_ASSERT_MSG(ok, "Failed to parse json input");
+ ARMNN_ASSERT_MSG(ok, "Failed to parse json input");
if (!ok)
{
diff --git a/src/armnnOnnxParser/OnnxParser.cpp b/src/armnnOnnxParser/OnnxParser.cpp
index e4259980ca..455bd873af 100644
--- a/src/armnnOnnxParser/OnnxParser.cpp
+++ b/src/armnnOnnxParser/OnnxParser.cpp
@@ -5,6 +5,7 @@
#include "OnnxParser.hpp"
#include <armnn/Descriptors.hpp>
+#include <armnn/utility/Assert.hpp>
#include <VerificationHelpers.hpp>
#include <boost/format.hpp>
@@ -388,7 +389,7 @@ std::vector<TensorInfo> OnnxParser::ComputeOutputInfo(std::vector<std::string> o
const IConnectableLayer* layer,
std::vector<TensorShape> inputShapes)
{
- BOOST_ASSERT(! outNames.empty());
+ ARMNN_ASSERT(! outNames.empty());
bool needCompute = std::any_of(outNames.begin(),
outNames.end(),
[this](std::string name)
@@ -401,7 +402,7 @@ std::vector<TensorInfo> OnnxParser::ComputeOutputInfo(std::vector<std::string> o
if(needCompute)
{
inferredShapes = layer->InferOutputShapes(inputShapes);
- BOOST_ASSERT(inferredShapes.size() == outNames.size());
+ ARMNN_ASSERT(inferredShapes.size() == outNames.size());
}
for (uint i = 0; i < outNames.size(); ++i)
{
@@ -607,7 +608,7 @@ INetworkPtr OnnxParser::CreateNetworkFromModel(onnx::ModelProto& model)
void OnnxParser::LoadGraph()
{
- BOOST_ASSERT(m_Graph.get() != nullptr);
+ ARMNN_ASSERT(m_Graph.get() != nullptr);
//Fill m_TensorsInfo with the shapes and value of every tensor
SetupInfo(m_Graph->mutable_output());
@@ -851,7 +852,7 @@ void OnnxParser::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx
CreateConstTensor(weightName).first,
Optional<ConstTensor>(CreateConstTensor(biasName).first),
matmulNode.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({addNode->output(0)}, layer,
{m_TensorsInfo[inputName].m_info->GetShape(),
@@ -868,7 +869,7 @@ void OnnxParser::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx
CreateConstTensor(weightName).first,
EmptyOptional(),
matmulNode.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({matmulNode.output(0)}, layer,
{m_TensorsInfo[inputName].m_info->GetShape(),
@@ -932,7 +933,7 @@ void OnnxParser::ParseGlobalAveragePool(const onnx::NodeProto& node)
desc.m_PoolHeight = inputShape[2];
IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1026,7 +1027,7 @@ void OnnxParser::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescripto
}
IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1048,7 +1049,7 @@ void OnnxParser::CreateReshapeLayer(const std::string& inputName,
reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// register the input connection slots for the layer, connections are made after all layers have been created
@@ -1121,7 +1122,7 @@ void OnnxParser::ParseActivation(const onnx::NodeProto& node, const armnn::Activ
}
IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, node.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({ node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1161,7 +1162,7 @@ void OnnxParser::ParseLeakyRelu(const onnx::NodeProto& node)
void OnnxParser::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, const Convolution2dDescriptor& convDesc)
{
- BOOST_ASSERT(node.op_type() == "Conv");
+ ARMNN_ASSERT(node.op_type() == "Conv");
DepthwiseConvolution2dDescriptor desc;
desc.m_PadLeft = convDesc.m_PadLeft;
@@ -1203,7 +1204,7 @@ void OnnxParser::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, cons
EmptyOptional(),
node.name().c_str());
}
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
{ m_TensorsInfo[node.input(0)].m_info->GetShape(),
@@ -1403,7 +1404,7 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node)
EmptyOptional(),
node.name().c_str());
}
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
{ m_TensorsInfo[node.input(0)].m_info->GetShape(),
@@ -1494,7 +1495,7 @@ void OnnxParser::ParseAdd(const onnx::NodeProto& node)
auto inputs = AddPrepareBroadcast(node.input(0), node.input(1));
auto input0 = *m_TensorsInfo[inputs.first].m_info;
auto input1 = *m_TensorsInfo[inputs.second].m_info;
- BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
unsigned int numDims = input0.GetNumDimensions();
for (unsigned int i = 0; i < numDims; i++)
@@ -1518,7 +1519,7 @@ void OnnxParser::ParseAdd(const onnx::NodeProto& node)
IConnectableLayer* layer = m_Network->AddAdditionLayer(node.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
{ m_TensorsInfo[inputs.first].m_info->GetShape(),
@@ -1574,7 +1575,7 @@ void OnnxParser::ParseBatchNormalization(const onnx::NodeProto& node)
biasTensor.first,
scaleTensor.first,
node.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1623,7 +1624,7 @@ void OnnxParser::SetupOutputLayers()
void OnnxParser::RegisterInputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
if (tensorIds.size() != layer->GetNumInputSlots())
{
throw ParseException(
@@ -1650,7 +1651,7 @@ void OnnxParser::RegisterInputSlots(IConnectableLayer* layer, const std::vector<
void OnnxParser::RegisterOutputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
if (tensorIds.size() != layer->GetNumOutputSlots())
{
throw ParseException(
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index a0c673a5ad..cbb10d71fa 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -9,6 +9,7 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
// armnnUtils:
@@ -22,7 +23,6 @@
#include <flatbuffers/flexbuffers.h>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/filesystem.hpp>
@@ -131,11 +131,11 @@ void CheckTensor(const TfLiteParser::ModelPtr & model,
{
// not checking model, because I assume CHECK_MODEL already run
// and checked that. An assert would do.
- BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
+ ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
// also subgraph index should be checked by CHECK_MODEL so
// I only add an assert here
- BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
+ ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
// the tensor index is the only one to check here
if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
@@ -435,8 +435,8 @@ CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
armnn::Optional<armnn::PermutationVector&> permutationVector)
{
IgnoreUnused(tensorPtr);
- BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
- BOOST_ASSERT_MSG(bufferPtr != nullptr,
+ ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
+ ARMNN_ASSERT_MSG(bufferPtr != nullptr,
boost::str(
boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
@@ -543,12 +543,12 @@ void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
IConnectableLayer *layer)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
- BOOST_ASSERT(operatorPtr->inputs.size() > 1);
+ ARMNN_ASSERT(operatorPtr->inputs.size() > 1);
uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
@@ -612,7 +612,7 @@ INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & b
INetworkPtr TfLiteParser::CreateNetworkFromModel()
{
m_Network = INetwork::Create();
- BOOST_ASSERT(m_Model.get() != nullptr);
+ ARMNN_ASSERT(m_Model.get() != nullptr);
bool failedToCreate = false;
std::stringstream errors;
@@ -710,8 +710,8 @@ void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
armnn::IOutputSlot* slot)
{
CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
- BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
- BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
+ ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
+ ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
@@ -734,8 +734,8 @@ void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
armnn::IInputSlot* slot)
{
CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
- BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
- BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
+ ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
+ ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
tensorSlots.inputSlots.push_back(slot);
@@ -878,7 +878,7 @@ void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
layerName.c_str());
}
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -965,7 +965,7 @@ void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorInd
EmptyOptional(),
layerName.c_str());
}
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -994,7 +994,7 @@ void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1035,7 +1035,7 @@ void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1104,7 +1104,7 @@ void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex
EmptyOptional(),
layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1185,7 +1185,7 @@ void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorInd
auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1292,7 +1292,7 @@ void TfLiteParser::ParsePool(size_t subgraphIndex,
boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
break;
default:
- BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
+ ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
}
Pooling2dDescriptor desc;
@@ -1324,7 +1324,7 @@ void TfLiteParser::ParsePool(size_t subgraphIndex,
IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1798,7 +1798,7 @@ void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2125,7 +2125,7 @@ void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex
auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -2198,7 +2198,7 @@ void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorInde
EmptyOptional(),
layerName.c_str());
}
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
@@ -2305,7 +2305,7 @@ void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operat
IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
// The model does not specify the output shapes.
// The output shapes are calculated from the max_detection and max_classes_per_detection.
@@ -2362,7 +2362,7 @@ void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2504,7 +2504,7 @@ void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
- BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
+ ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
const unsigned int splitDim = axisData[0];
auto inputDimSize = inputTensorInfo.GetNumDimensions();
@@ -2764,7 +2764,7 @@ void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
const std::vector<unsigned int>& tensorIndexes)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
if (tensorIndexes.size() != layer->GetNumInputSlots())
{
throw ParseException(
@@ -2791,7 +2791,7 @@ void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
const std::vector<unsigned int>& tensorIndexes)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
if (tensorIndexes.size() != layer->GetNumOutputSlots())
{
throw ParseException(
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index 797e11e403..56811b5b6d 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -11,6 +11,7 @@
#include <armnn/IRuntime.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/BackendRegistry.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnnTfLiteParser/ITfLiteParser.hpp>
@@ -19,7 +20,6 @@
#include <test/TensorHelpers.hpp>
#include <boost/filesystem.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include "flatbuffers/idl.h"
@@ -107,10 +107,10 @@ struct ParserFlatbuffersFixture
flatbuffers::Parser parser;
bool ok = parser.Parse(schemafile.c_str());
- BOOST_ASSERT_MSG(ok, "Failed to parse schema file");
+ ARMNN_ASSERT_MSG(ok, "Failed to parse schema file");
ok &= parser.Parse(m_JsonString.c_str());
- BOOST_ASSERT_MSG(ok, "Failed to parse json input");
+ ARMNN_ASSERT_MSG(ok, "Failed to parse json input");
if (!ok)
{
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
index 39dee679fd..21392ace02 100644
--- a/src/armnnTfLiteParser/test/Unsupported.cpp
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -7,10 +7,10 @@
#include "../TfLiteParser.hpp"
#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/utility/Assert.hpp>
#include <layers/StandInLayer.hpp>
-#include <boost/assert.hpp>
#include <boost/polymorphic_cast.hpp>
#include <boost/test/unit_test.hpp>
@@ -78,10 +78,10 @@ public:
, m_StandInLayerVerifier(inputInfos, outputInfos)
{
const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputInfos.size());
- BOOST_ASSERT(numInputs > 0);
+ ARMNN_ASSERT(numInputs > 0);
const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputInfos.size());
- BOOST_ASSERT(numOutputs > 0);
+ ARMNN_ASSERT(numOutputs > 0);
m_JsonString = R"(
{
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 793bd0e233..491a9648cd 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -468,7 +468,7 @@ public:
IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
{
- BOOST_ASSERT(m_Layer);
+ ARMNN_ASSERT(m_Layer);
// Assumes one-to-one mapping between Tf and armnn output slots.
unsigned int armnnOutputSlotIdx = tfOutputIndex;
if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
@@ -858,7 +858,7 @@ public:
virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
{
- BOOST_ASSERT(m_Representative);
+ ARMNN_ASSERT(m_Representative);
return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
}
@@ -892,12 +892,12 @@ public:
m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
m_TensorInfo(tensorInfo)
{
- BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
+ ARMNN_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
}
void CreateLayerDeferred() override
{
- BOOST_ASSERT(m_Layer == nullptr);
+ ARMNN_ASSERT(m_Layer == nullptr);
m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
}
@@ -1068,7 +1068,7 @@ struct InvokeParseFunction
ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
IgnoreUnused(graphDef);
- BOOST_ASSERT(nodeDef.op() == "Const");
+ ARMNN_ASSERT(nodeDef.op() == "Const");
if (nodeDef.attr().count("value") == 0)
{
@@ -1467,7 +1467,7 @@ ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& n
TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
{
- BOOST_ASSERT(nodeDef.op() == "ExpandDims");
+ ARMNN_ASSERT(nodeDef.op() == "ExpandDims");
if (inputTensorInfo.GetNumDimensions() > 4) {
throw ParseException(
@@ -1679,10 +1679,10 @@ bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef
size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
- BOOST_ASSERT(inputs.size() == 2);
- BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
- BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
- BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
+ ARMNN_ASSERT(inputs.size() == 2);
+ ARMNN_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
+ ARMNN_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
+ ARMNN_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
{
@@ -1744,7 +1744,7 @@ ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
{
- BOOST_ASSERT(outputOfLeakyRelu != nullptr);
+ ARMNN_ASSERT(outputOfLeakyRelu != nullptr);
IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
@@ -2091,7 +2091,7 @@ ParsedTfOperationPtr TfParser::ParseTranspose(const tensorflow::NodeDef& nodeDef
const auto desc = TransposeDescriptor(permutationVector);
auto* layer = m_Network->AddTransposeLayer(desc, nodeDef.name().c_str());
- BOOST_ASSERT(layer);
+ ARMNN_ASSERT(layer);
input0Slot->Connect(layer->GetInputSlot(0));
@@ -2462,7 +2462,7 @@ ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& no
TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
{
- BOOST_ASSERT(nodeDef.op() == "Squeeze");
+ ARMNN_ASSERT(nodeDef.op() == "Squeeze");
tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
DataType type;
@@ -2598,7 +2598,7 @@ public:
void CreateLayerDeferred() override
{
- BOOST_ASSERT(m_Layer == nullptr);
+ ARMNN_ASSERT(m_Layer == nullptr);
m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
}
};
@@ -2681,7 +2681,7 @@ public:
void CreateLayerDeferred() override
{
- BOOST_ASSERT(m_Layer == nullptr);
+ ARMNN_ASSERT(m_Layer == nullptr);
m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
}
};
@@ -3393,7 +3393,7 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
}
layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
diff --git a/src/armnnTfParser/test/AddN.cpp b/src/armnnTfParser/test/AddN.cpp
index 19affa85c7..16b1124e24 100644
--- a/src/armnnTfParser/test/AddN.cpp
+++ b/src/armnnTfParser/test/AddN.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <boost/test/unit_test.hpp>
#include "armnnTfParser/ITfParser.hpp"
@@ -19,7 +19,7 @@ struct AddNFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITf
{
AddNFixture(const std::vector<armnn::TensorShape> inputShapes, unsigned int numberOfInputs)
{
- BOOST_ASSERT(inputShapes.size() == numberOfInputs);
+ ARMNN_ASSERT(inputShapes.size() == numberOfInputs);
m_Prototext = "";
for (unsigned int i = 0; i < numberOfInputs; i++)
{
diff --git a/src/armnnTfParser/test/Convolution2d.cpp b/src/armnnTfParser/test/Convolution2d.cpp
index aead1fe965..cf714894a2 100644
--- a/src/armnnTfParser/test/Convolution2d.cpp
+++ b/src/armnnTfParser/test/Convolution2d.cpp
@@ -152,7 +152,7 @@ struct Convolution2dFixture : public armnnUtils::ParserPrototxtFixture<armnnTfPa
"} \n");
// Manual height computation based on stride parameter.
- BOOST_ASSERT_MSG(stride == 1 || stride == 2, "Add support for strides other than 1 or 2.");
+ ARMNN_ASSERT_MSG(stride == 1 || stride == 2, "Add support for strides other than 1 or 2.");
std::array<unsigned int, 4> dims;
if (dataLayout == "NHWC")
{
diff --git a/src/armnnUtils/DotSerializer.cpp b/src/armnnUtils/DotSerializer.cpp
index 7416ff6bdf..80043a9f90 100644
--- a/src/armnnUtils/DotSerializer.cpp
+++ b/src/armnnUtils/DotSerializer.cpp
@@ -5,7 +5,6 @@
#include "DotSerializer.hpp"
-#include <boost/assert.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <sstream>
#include <cstring>
diff --git a/src/armnnUtils/FloatingPointConverter.cpp b/src/armnnUtils/FloatingPointConverter.cpp
index 2216824205..5d89a25cd5 100644
--- a/src/armnnUtils/FloatingPointConverter.cpp
+++ b/src/armnnUtils/FloatingPointConverter.cpp
@@ -8,7 +8,7 @@
#include "BFloat16.hpp"
#include "Half.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnnUtils
{
@@ -17,8 +17,8 @@ void FloatingPointConverter::ConvertFloat32To16(const float* srcFloat32Buffer,
size_t numElements,
void* dstFloat16Buffer)
{
- BOOST_ASSERT(srcFloat32Buffer != nullptr);
- BOOST_ASSERT(dstFloat16Buffer != nullptr);
+ ARMNN_ASSERT(srcFloat32Buffer != nullptr);
+ ARMNN_ASSERT(dstFloat16Buffer != nullptr);
armnn::Half* pHalf = reinterpret_cast<armnn::Half*>(dstFloat16Buffer);
@@ -32,8 +32,8 @@ void FloatingPointConverter::ConvertFloat16To32(const void* srcFloat16Buffer,
size_t numElements,
float* dstFloat32Buffer)
{
- BOOST_ASSERT(srcFloat16Buffer != nullptr);
- BOOST_ASSERT(dstFloat32Buffer != nullptr);
+ ARMNN_ASSERT(srcFloat16Buffer != nullptr);
+ ARMNN_ASSERT(dstFloat32Buffer != nullptr);
const armnn::Half* pHalf = reinterpret_cast<const armnn::Half*>(srcFloat16Buffer);
@@ -47,8 +47,8 @@ void FloatingPointConverter::ConvertFloat32ToBFloat16(const float* srcFloat32Buf
size_t numElements,
void* dstBFloat16Buffer)
{
- BOOST_ASSERT(srcFloat32Buffer != nullptr);
- BOOST_ASSERT(dstBFloat16Buffer != nullptr);
+ ARMNN_ASSERT(srcFloat32Buffer != nullptr);
+ ARMNN_ASSERT(dstBFloat16Buffer != nullptr);
armnn::BFloat16* bf16 = reinterpret_cast<armnn::BFloat16*>(dstBFloat16Buffer);
@@ -62,8 +62,8 @@ void FloatingPointConverter::ConvertBFloat16ToFloat32(const void* srcBFloat16Buf
size_t numElements,
float* dstFloat32Buffer)
{
- BOOST_ASSERT(srcBFloat16Buffer != nullptr);
- BOOST_ASSERT(dstFloat32Buffer != nullptr);
+ ARMNN_ASSERT(srcBFloat16Buffer != nullptr);
+ ARMNN_ASSERT(dstFloat32Buffer != nullptr);
const armnn::BFloat16* bf16 = reinterpret_cast<const armnn::BFloat16*>(srcBFloat16Buffer);
diff --git a/src/armnnUtils/GraphTopologicalSort.hpp b/src/armnnUtils/GraphTopologicalSort.hpp
index 11314590a0..f3c4b191f8 100644
--- a/src/armnnUtils/GraphTopologicalSort.hpp
+++ b/src/armnnUtils/GraphTopologicalSort.hpp
@@ -5,7 +5,6 @@
#pragma once
#include <armnn/Optional.hpp>
-#include <boost/assert.hpp>
#include <functional>
#include <map>
diff --git a/src/armnnUtils/ModelAccuracyChecker.cpp b/src/armnnUtils/ModelAccuracyChecker.cpp
index 818cb17a65..d197dc86ba 100644
--- a/src/armnnUtils/ModelAccuracyChecker.cpp
+++ b/src/armnnUtils/ModelAccuracyChecker.cpp
@@ -64,7 +64,7 @@ std::vector<std::string>
// Remove any preceding and trailing character specified in the characterSet.
std::string Strip(const std::string& originalString, const std::string& characterSet)
{
- BOOST_ASSERT(!characterSet.empty());
+ ARMNN_ASSERT(!characterSet.empty());
const std::size_t firstFound = originalString.find_first_not_of(characterSet);
const std::size_t lastFound = originalString.find_last_not_of(characterSet);
// Return empty if the originalString is empty or the originalString contains only to-be-striped characters
diff --git a/src/armnnUtils/ModelAccuracyChecker.hpp b/src/armnnUtils/ModelAccuracyChecker.hpp
index c4dd4f1b05..6595a52a98 100644
--- a/src/armnnUtils/ModelAccuracyChecker.hpp
+++ b/src/armnnUtils/ModelAccuracyChecker.hpp
@@ -7,7 +7,7 @@
#include <algorithm>
#include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <boost/variant/apply_visitor.hpp>
#include <cstddef>
#include <functional>
diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp
index 535d68adbe..952c76885a 100644
--- a/src/armnnUtils/TensorUtils.cpp
+++ b/src/armnnUtils/TensorUtils.cpp
@@ -6,8 +6,8 @@
#include <armnnUtils/TensorUtils.hpp>
#include <armnn/backends/ITensorHandle.hpp>
+#include <armnn/utility/Assert.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
@@ -114,8 +114,8 @@ unsigned int GetNumElementsBetween(const TensorShape& shape,
const unsigned int firstAxisInclusive,
const unsigned int lastAxisExclusive)
{
- BOOST_ASSERT(firstAxisInclusive <= lastAxisExclusive);
- BOOST_ASSERT(lastAxisExclusive <= shape.GetNumDimensions());
+ ARMNN_ASSERT(firstAxisInclusive <= lastAxisExclusive);
+ ARMNN_ASSERT(lastAxisExclusive <= shape.GetNumDimensions());
unsigned int count = 1;
for (unsigned int i = firstAxisInclusive; i < lastAxisExclusive; i++)
{
@@ -126,9 +126,9 @@ unsigned int GetNumElementsBetween(const TensorShape& shape,
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
{
- BOOST_ASSERT_MSG(axis < boost::numeric_cast<int>(inputDimension),
+ ARMNN_ASSERT_MSG(axis < boost::numeric_cast<int>(inputDimension),
"Required axis index greater than number of dimensions.");
- BOOST_ASSERT_MSG(axis >= -boost::numeric_cast<int>(inputDimension),
+ ARMNN_ASSERT_MSG(axis >= -boost::numeric_cast<int>(inputDimension),
"Required axis index lower than negative of the number of dimensions");
unsigned int uAxis = axis < 0 ?
@@ -140,7 +140,7 @@ unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
unsigned int GetNumElementsAfter(const armnn::TensorShape& shape, unsigned int axis)
{
unsigned int numDim = shape.GetNumDimensions();
- BOOST_ASSERT(axis <= numDim - 1);
+ ARMNN_ASSERT(axis <= numDim - 1);
unsigned int count = 1;
for (unsigned int i = axis; i < numDim; i++)
{
diff --git a/src/armnnUtils/test/ParserHelperTest.cpp b/src/armnnUtils/test/ParserHelperTest.cpp
index dc37450ac1..dbf0673bf5 100644
--- a/src/armnnUtils/test/ParserHelperTest.cpp
+++ b/src/armnnUtils/test/ParserHelperTest.cpp
@@ -29,8 +29,8 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
CalculateReducedOutputTensoInfo(inputTensorInfo, axisData1, keepDims, outputTensorInfo1);
- BOOST_ASSERT(outputTensorInfo1.GetNumDimensions() == 1);
- BOOST_ASSERT(outputTensorInfo1.GetShape()[0] == 1);
+ BOOST_TEST(outputTensorInfo1.GetNumDimensions() == 1);
+ BOOST_TEST(outputTensorInfo1.GetShape()[0] == 1);
// Reducing dimension 0 results in a 3x4 size tensor (one dimension)
std::set<unsigned int> axisData2 = { 0 };
@@ -38,8 +38,8 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
CalculateReducedOutputTensoInfo(inputTensorInfo, axisData2, keepDims, outputTensorInfo2);
- BOOST_ASSERT(outputTensorInfo2.GetNumDimensions() == 1);
- BOOST_ASSERT(outputTensorInfo2.GetShape()[0] == 12);
+ BOOST_TEST(outputTensorInfo2.GetNumDimensions() == 1);
+ BOOST_TEST(outputTensorInfo2.GetShape()[0] == 12);
// Reducing dimensions 0,1 results in a 4 size tensor (one dimension)
std::set<unsigned int> axisData3 = { 0, 1 };
@@ -47,8 +47,8 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
CalculateReducedOutputTensoInfo(inputTensorInfo, axisData3, keepDims, outputTensorInfo3);
- BOOST_ASSERT(outputTensorInfo3.GetNumDimensions() == 1);
- BOOST_ASSERT(outputTensorInfo3.GetShape()[0] == 4);
+ BOOST_TEST(outputTensorInfo3.GetNumDimensions() == 1);
+ BOOST_TEST(outputTensorInfo3.GetShape()[0] == 4);
// Reducing dimension 0 results in a { 1, 3, 4 } dimension tensor
keepDims = true;
@@ -58,10 +58,10 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
CalculateReducedOutputTensoInfo(inputTensorInfo, axisData4, keepDims, outputTensorInfo4);
- BOOST_ASSERT(outputTensorInfo4.GetNumDimensions() == 3);
- BOOST_ASSERT(outputTensorInfo4.GetShape()[0] == 1);
- BOOST_ASSERT(outputTensorInfo4.GetShape()[1] == 3);
- BOOST_ASSERT(outputTensorInfo4.GetShape()[2] == 4);
+ BOOST_TEST(outputTensorInfo4.GetNumDimensions() == 3);
+ BOOST_TEST(outputTensorInfo4.GetShape()[0] == 1);
+ BOOST_TEST(outputTensorInfo4.GetShape()[1] == 3);
+ BOOST_TEST(outputTensorInfo4.GetShape()[2] == 4);
// Reducing dimension 1, 2 results in a { 2, 1, 1 } dimension tensor
keepDims = true;
@@ -71,10 +71,10 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
CalculateReducedOutputTensoInfo(inputTensorInfo, axisData5, keepDims, outputTensorInfo5);
- BOOST_ASSERT(outputTensorInfo5.GetNumDimensions() == 3);
- BOOST_ASSERT(outputTensorInfo5.GetShape()[0] == 2);
- BOOST_ASSERT(outputTensorInfo5.GetShape()[1] == 1);
- BOOST_ASSERT(outputTensorInfo5.GetShape()[2] == 1);
+ BOOST_TEST(outputTensorInfo5.GetNumDimensions() == 3);
+ BOOST_TEST(outputTensorInfo5.GetShape()[0] == 2);
+ BOOST_TEST(outputTensorInfo5.GetShape()[1] == 1);
+ BOOST_TEST(outputTensorInfo5.GetShape()[2] == 1);
}
diff --git a/src/armnnUtils/test/PrototxtConversionsTest.cpp b/src/armnnUtils/test/PrototxtConversionsTest.cpp
index f263a52340..d51c8015cf 100644
--- a/src/armnnUtils/test/PrototxtConversionsTest.cpp
+++ b/src/armnnUtils/test/PrototxtConversionsTest.cpp
@@ -15,28 +15,28 @@ BOOST_AUTO_TEST_CASE(ConvertInt32ToOctalStringTest)
using armnnUtils::ConvertInt32ToOctalString;
std::string octalString = ConvertInt32ToOctalString(1);
- BOOST_ASSERT(octalString.compare("\\\\001\\\\000\\\\000\\\\000"));
+ BOOST_TEST(octalString.compare("\\\\001\\\\000\\\\000\\\\000"));
octalString = ConvertInt32ToOctalString(256);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\100\\\\000\\\\000"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\100\\\\000\\\\000"));
octalString = ConvertInt32ToOctalString(65536);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\100\\\\000"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\100\\\\000"));
octalString = ConvertInt32ToOctalString(16777216);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\000\\\\100"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\000\\\\100"));
octalString = ConvertInt32ToOctalString(-1);
- BOOST_ASSERT(octalString.compare("\\\\377\\\\377\\\\377\\\\377"));
+ BOOST_TEST(octalString.compare("\\\\377\\\\377\\\\377\\\\377"));
octalString = ConvertInt32ToOctalString(-256);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\377\\\\377\\\\377"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\377\\\\377\\\\377"));
octalString = ConvertInt32ToOctalString(-65536);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\377\\\\377"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\377\\\\377"));
octalString = ConvertInt32ToOctalString(-16777216);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\000\\\\377"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\000\\\\377"));
}
BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
@@ -51,13 +51,13 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
};
auto output_string = createAndConvert({5});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 5\n"
"}"));
output_string = createAndConvert({4, 5});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 4\n"
"}\n"
@@ -67,7 +67,7 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
));
output_string = createAndConvert({3, 4, 5});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 3\n"
"}\n"
@@ -80,7 +80,7 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
));
output_string = createAndConvert({2, 3, 4, 5});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 2\n"
"}\n"
@@ -96,7 +96,7 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
));
output_string = createAndConvert({1, 2, 3, 4, 5});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 1\n"
"}\n"
@@ -115,7 +115,7 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
));
output_string = createAndConvert({0xffffffff, 0xffffffff});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 4294967295\n"
"}\n"
@@ -125,7 +125,7 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
));
output_string = createAndConvert({1, 0});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 1\n"
"}\n"
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index f5a9e05de9..7a75f9c872 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -42,7 +42,7 @@ arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multi
case armnn::DataType::Signed32:
return arm_compute::DataType::S32;
default:
- BOOST_ASSERT_MSG(false, "Unknown data type");
+ ARMNN_ASSERT_MSG(false, "Unknown data type");
return arm_compute::DataType::UNKNOWN;
}
}
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index 9c6f46462e..80bb7623e8 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -6,11 +6,10 @@
#include <armnn/Descriptors.hpp>
#include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
#include <arm_compute/core/Types.h>
-#include <boost/assert.hpp>
-
namespace armnn
{
@@ -161,7 +160,7 @@ inline unsigned int ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc,
unsigned int dim = tensor.GetNumDimensions();
- BOOST_ASSERT(dim != 0);
+ ARMNN_ASSERT(dim != 0);
// Currently ArmNN support axis 1.
return dim - 1;
diff --git a/src/backends/aclCommon/BaseMemoryManager.cpp b/src/backends/aclCommon/BaseMemoryManager.cpp
index 844fbcd4ca..b43eaf8da3 100644
--- a/src/backends/aclCommon/BaseMemoryManager.cpp
+++ b/src/backends/aclCommon/BaseMemoryManager.cpp
@@ -19,7 +19,7 @@ namespace armnn
BaseMemoryManager::BaseMemoryManager(std::unique_ptr<arm_compute::IAllocator> alloc,
MemoryAffinity memoryAffinity)
{
- BOOST_ASSERT(alloc);
+ ARMNN_ASSERT(alloc);
m_Allocator = std::move(alloc);
m_IntraLayerMemoryMgr = CreateArmComputeMemoryManager(memoryAffinity);
@@ -51,30 +51,30 @@ void BaseMemoryManager::Acquire()
static const size_t s_NumPools = 1;
// Allocate memory pools for intra-layer memory manager
- BOOST_ASSERT(m_IntraLayerMemoryMgr);
+ ARMNN_ASSERT(m_IntraLayerMemoryMgr);
m_IntraLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
// Allocate memory pools for inter-layer memory manager
- BOOST_ASSERT(m_InterLayerMemoryMgr);
+ ARMNN_ASSERT(m_InterLayerMemoryMgr);
m_InterLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
// Acquire inter-layer memory group. NOTE: This has to come after allocating the pools
- BOOST_ASSERT(m_InterLayerMemoryGroup);
+ ARMNN_ASSERT(m_InterLayerMemoryGroup);
m_InterLayerMemoryGroup->acquire();
}
void BaseMemoryManager::Release()
{
// Release inter-layer memory group. NOTE: This has to come before releasing the pools
- BOOST_ASSERT(m_InterLayerMemoryGroup);
+ ARMNN_ASSERT(m_InterLayerMemoryGroup);
m_InterLayerMemoryGroup->release();
// Release memory pools managed by intra-layer memory manager
- BOOST_ASSERT(m_IntraLayerMemoryMgr);
+ ARMNN_ASSERT(m_IntraLayerMemoryMgr);
m_IntraLayerMemoryMgr->clear();
// Release memory pools managed by inter-layer memory manager
- BOOST_ASSERT(m_InterLayerMemoryMgr);
+ ARMNN_ASSERT(m_InterLayerMemoryMgr);
m_InterLayerMemoryMgr->clear();
}
#else
diff --git a/src/backends/backendsCommon/CpuTensorHandle.cpp b/src/backends/backendsCommon/CpuTensorHandle.cpp
index 65e6c47179..7bcf59fdf1 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.cpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.cpp
@@ -118,8 +118,8 @@ void ScopedCpuTensorHandle::CopyFrom(const ScopedCpuTensorHandle& other)
void ScopedCpuTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes)
{
- BOOST_ASSERT(GetTensor<void>() == nullptr);
- BOOST_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
+ ARMNN_ASSERT(GetTensor<void>() == nullptr);
+ ARMNN_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
if (srcMemory)
{
diff --git a/src/backends/backendsCommon/CpuTensorHandle.hpp b/src/backends/backendsCommon/CpuTensorHandle.hpp
index e6e59fcd4f..78efb08f99 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.hpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.hpp
@@ -14,7 +14,7 @@
#include <algorithm>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -30,7 +30,7 @@ public:
template <typename T>
const T* GetConstTensor() const
{
- BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
+ ARMNN_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
return reinterpret_cast<const T*>(m_Memory);
}
@@ -59,8 +59,8 @@ protected:
private:
// Only used for testing
- void CopyOutTo(void *) const override { BOOST_ASSERT_MSG(false, "Unimplemented"); }
- void CopyInFrom(const void*) override { BOOST_ASSERT_MSG(false, "Unimplemented"); }
+ void CopyOutTo(void *) const override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
+ void CopyInFrom(const void*) override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
ConstCpuTensorHandle(const ConstCpuTensorHandle& other) = delete;
ConstCpuTensorHandle& operator=(const ConstCpuTensorHandle& other) = delete;
@@ -79,7 +79,7 @@ public:
template <typename T>
T* GetTensor() const
{
- BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
+ ARMNN_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
return reinterpret_cast<T*>(m_MutableMemory);
}
diff --git a/src/backends/backendsCommon/LayerSupportRules.hpp b/src/backends/backendsCommon/LayerSupportRules.hpp
index 03bec53353..ddecc82172 100644
--- a/src/backends/backendsCommon/LayerSupportRules.hpp
+++ b/src/backends/backendsCommon/LayerSupportRules.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <algorithm>
namespace armnn
@@ -30,7 +30,7 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
case armnn::DataType::QAsymmS8:
return armnn::DataType::Signed32;
default:
- BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+ ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
}
return armnn::EmptyOptional();
}
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 8abc8a6ef5..560182286e 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -70,7 +70,7 @@ std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descrip
case DataType::QSymmS16:
return nullptr;
default:
- BOOST_ASSERT_MSG(false, "Unknown DataType.");
+ ARMNN_ASSERT_MSG(false, "Unknown DataType.");
return nullptr;
}
}
diff --git a/src/backends/backendsCommon/Workload.hpp b/src/backends/backendsCommon/Workload.hpp
index 984443b79b..244b5f1249 100644
--- a/src/backends/backendsCommon/Workload.hpp
+++ b/src/backends/backendsCommon/Workload.hpp
@@ -65,9 +65,9 @@ public:
if (std::find(dataTypes.begin(), dataTypes.end(), expectedInputType) == dataTypes.end())
{
- BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+ ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
}
- BOOST_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()),
+ ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()),
info.m_InputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == expectedInputType;
@@ -84,14 +84,14 @@ public:
{
if (expectedOutputType != expectedInputType)
{
- BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+ ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
}
}
else if (std::find(dataTypes.begin(), dataTypes.end(), expectedOutputType) == dataTypes.end())
{
- BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+ ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
}
- BOOST_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()),
+ ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()),
info.m_OutputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == expectedOutputType;
@@ -109,14 +109,14 @@ public:
MultiTypedWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<QueueDescriptor>(descriptor, info)
{
- BOOST_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(),
+ ARMNN_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(),
info.m_InputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == InputDataType;
}),
"Trying to create workload with incorrect type");
- BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
+ ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
info.m_OutputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == OutputDataType;
@@ -136,11 +136,11 @@ public:
{
if (!info.m_InputTensorInfos.empty())
{
- BOOST_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType,
+ ARMNN_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType,
"Trying to create workload with incorrect type");
}
- BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
+ ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
info.m_OutputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == DataType;
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index f968ad78f7..1f4a849ee9 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -40,7 +40,7 @@ DataType GetBiasDataType(DataType inputDataType)
case DataType::QSymmS16:
return DataType::Signed32;
default:
- BOOST_ASSERT_MSG(false, "Invalid input data type");
+ ARMNN_ASSERT_MSG(false, "Invalid input data type");
return DataType::Float32;
}
}
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 5628c36884..a7e8576668 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -194,7 +194,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
dataType);
const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
@@ -244,7 +244,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
dataType);
const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
@@ -335,7 +335,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
TensorInfo biasInfo;
const TensorInfo * biasInfoPtr = nullptr;
@@ -347,7 +347,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
biasInfoPtr = &biasInfo;
}
@@ -381,7 +381,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
}
default:
{
- BOOST_ASSERT_MSG(false, "Unexpected bias type");
+ ARMNN_ASSERT_MSG(false, "Unexpected bias type");
}
}
}
@@ -1156,12 +1156,12 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
Optional<TensorInfo> biases;
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
GetBiasTypeFromWeightsType(dataType));
}
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
result = layerSupportObject->IsTransposeConvolution2dSupported(input,
@@ -1175,7 +1175,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
}
default:
{
- BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
+ ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
reason.value() = "Unrecognised layer type";
result = false;
break;
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index 3b3959ba9f..bd5e81e678 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -13,8 +13,8 @@ namespace armnn
armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
const PermutationVector& permutationVector, void* permuteBuffer)
{
- BOOST_ASSERT_MSG(tensor, "Invalid input tensor");
- BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
+ ARMNN_ASSERT_MSG(tensor, "Invalid input tensor");
+ ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
TensorInfo tensorInfo = tensor->GetTensorInfo();
@@ -133,8 +133,8 @@ armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle*
DataLayout dataLayout,
void* permuteBuffer)
{
- BOOST_ASSERT_MSG(weightTensor, "Invalid input tensor");
- BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
+ ARMNN_ASSERT_MSG(weightTensor, "Invalid input tensor");
+ ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
auto multiplier = weightTensor->GetTensorInfo().GetShape()[0];
auto inputChannels = weightTensor->GetTensorInfo().GetShape()[1];
diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp
index 66056db4ca..a4da924725 100644
--- a/src/backends/backendsCommon/WorkloadUtils.hpp
+++ b/src/backends/backendsCommon/WorkloadUtils.hpp
@@ -168,8 +168,8 @@ void CopyTensorContentsGeneric(const ITensorHandle* srcTensor, ITensorHandle* ds
auto dstPtrChannel = dstData;
for (unsigned int w = 0; w < copyWidth; ++w)
{
- BOOST_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
- BOOST_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
+ ARMNN_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
+ ARMNN_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
copy(dstData, srcData, copyLength);
dstData += dstWidthStride;
srcData += srcWidthStride;
diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp
index 116bf77c63..abdaa8131b 100644
--- a/src/backends/backendsCommon/test/MockBackend.cpp
+++ b/src/backends/backendsCommon/test/MockBackend.cpp
@@ -23,7 +23,7 @@ namespace
bool IsLayerSupported(const armnn::Layer* layer)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::LayerType layerType = layer->GetType();
switch (layerType)
@@ -47,7 +47,7 @@ bool IsLayerSupported(const armnn::Layer& layer)
bool IsLayerOptimizable(const armnn::Layer* layer)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
// A Layer is not optimizable if its name contains "unoptimizable"
const std::string layerName(layer->GetName());
@@ -191,7 +191,7 @@ OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph
supportedSubgraphs.end(),
[&optimizationViews](const SubgraphView::SubgraphViewPtr& supportedSubgraph)
{
- BOOST_ASSERT(supportedSubgraph != nullptr);
+ ARMNN_ASSERT(supportedSubgraph != nullptr);
PreCompiledLayer* preCompiledLayer =
optimizationViews.GetGraph().AddLayer<PreCompiledLayer>(
@@ -228,7 +228,7 @@ OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph
unsupportedSubgraphs.end(),
[&optimizationViews](const SubgraphView::SubgraphViewPtr& unsupportedSubgraph)
{
- BOOST_ASSERT(unsupportedSubgraph != nullptr);
+ ARMNN_ASSERT(unsupportedSubgraph != nullptr);
optimizationViews.AddFailedSubgraph(SubgraphView(*unsupportedSubgraph));
});
@@ -256,7 +256,7 @@ OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph
untouchedSubgraphs.end(),
[&optimizationViews](const SubgraphView::SubgraphViewPtr& untouchedSubgraph)
{
- BOOST_ASSERT(untouchedSubgraph != nullptr);
+ ARMNN_ASSERT(untouchedSubgraph != nullptr);
optimizationViews.AddUntouchedSubgraph(SubgraphView(*untouchedSubgraph));
});
diff --git a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
index df001b7530..9f38e47715 100644
--- a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
+++ b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
@@ -106,7 +106,7 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
case armnn::DataType::QSymmS16:
return armnn::DataType::Signed32;
default:
- BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+ ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
}
return armnn::EmptyOptional();
}
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 319434e093..a82048cd81 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -1212,9 +1212,9 @@ LayerTestResult<T,4> CompareActivationTestImpl(
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
- BOOST_ASSERT(workload != nullptr);
+ ARMNN_ASSERT(workload != nullptr);
std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
- BOOST_ASSERT(workloadRef != nullptr);
+ ARMNN_ASSERT(workloadRef != nullptr);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index 2156b0ee9e..a6b703b08b 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -5,7 +5,7 @@
#include "ComparisonTestImpl.hpp"
-
+#include <armnn/utility/Assert.hpp>
#include <Half.hpp>
#include <QuantizeHelper.hpp>
#include <ResolveType.hpp>
@@ -18,8 +18,6 @@
#include <test/TensorHelpers.hpp>
-#include <boost/assert.hpp>
-
namespace
{
@@ -44,13 +42,13 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
int outQuantOffset)
{
IgnoreUnused(memoryManager);
- BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
+ ARMNN_ASSERT(shape0.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
- BOOST_ASSERT(shape1.GetNumDimensions() == NumDims);
+ ARMNN_ASSERT(shape1.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo1(shape1, ArmnnInType, quantScale1, quantOffset1);
- BOOST_ASSERT(outShape.GetNumDimensions() == NumDims);
+ ARMNN_ASSERT(outShape.GetNumDimensions() == NumDims);
armnn::TensorInfo outputTensorInfo(outShape, armnn::DataType::Boolean, outQuantScale, outQuantOffset);
auto input0 = MakeTensor<InType, NumDims>(inputTensorInfo0, values0);
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 1e40b42dcf..9e08e30dec 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -61,7 +61,7 @@ bool NeedPermuteForConcat(
}
else
{
- BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
+ ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
"Input shapes must have the same number of dimensions");
}
}
@@ -92,7 +92,7 @@ void Generate3dPermuteVectorForConcat(
unsigned int & concatDim,
std::pair<PermutationVector, PermutationVector> & permutations)
{
- BOOST_ASSERT_MSG(numDimensions <= 3,
+ ARMNN_ASSERT_MSG(numDimensions <= 3,
"Only dimensions 1,2 and 3 are supported by this helper");
unsigned int expandedBy = 3 - numDimensions;
unsigned int expandedConcatAxis = concatDim + expandedBy;
@@ -113,7 +113,7 @@ void Generate3dPermuteVectorForConcat(
}
else
{
- BOOST_ASSERT(expandedConcatAxis == 0);
+ ARMNN_ASSERT(expandedConcatAxis == 0);
concatDim = 0;
}
}
@@ -127,7 +127,7 @@ template<typename T> void PermuteTensorData(
std::vector<T>& outputData)
{
IgnoreUnused(memoryManager);
- BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
+ ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
if (inputData == nullptr)
{
// Nullptr is an error in the test. By returning without doing the concatenation
@@ -179,7 +179,7 @@ template<typename T> void PermuteInputsForConcat(
TensorInfo & outputTensorInfo)
{
IgnoreUnused(memoryManager);
- BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
+ ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1,
"Expecting more than one tensor to be concatenated here");
unsigned int numDims = 0;
@@ -200,12 +200,12 @@ template<typename T> void PermuteInputsForConcat(
// Store the reverese permutation.
permuteVector = permutations.second;
- BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
+ ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity),
"Test logic error, we don't need permutation, so we shouldn't arrive here");
}
else
{
- BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
+ ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
"All inputs must have the same number of dimensions");
}
@@ -244,7 +244,7 @@ template <typename T> void PermuteOutputForConcat(
std::unique_ptr<ITensorHandle> && inputDataHandle,
T * data)
{
- BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
+ ARMNN_ASSERT_MSG(data != nullptr, "data must not be null");
if (data == nullptr)
{
// Nullptr is an error in the test. By returning without doing the permutation
@@ -279,7 +279,7 @@ template<typename T> void Concatenate(
unsigned int concatDim,
bool useSubtensor)
{
- BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
+ ARMNN_ASSERT_MSG(output != nullptr, "output must not be null");
if (output == nullptr)
{
// Nullptr is an error in the test. By returning without doing the permutation
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 50ad667dde..c66027efdf 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -169,9 +169,9 @@ template<typename T, typename B>
void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
{
- BOOST_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
+ ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
"Invalid type and parameter combination.");
- BOOST_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
+ ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
"Invalid type and parameter combination.");
// Note we need to dequantize and re-quantize the image value and the bias.
@@ -183,7 +183,7 @@ void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
for (uint32_t x = 0; x < w; ++x)
{
uint32_t offset = (i * h + y) * w + x;
- BOOST_ASSERT(offset < v.size());
+ ARMNN_ASSERT(offset < v.size());
T& outRef = v[offset];
float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
@@ -236,11 +236,11 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
bool biasEnabled = bias.size() > 0;
// This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
- BOOST_ASSERT(inputNum == 1);
- BOOST_ASSERT(outputNum == 1);
+ ARMNN_ASSERT(inputNum == 1);
+ ARMNN_ASSERT(outputNum == 1);
// If a bias is used, its size must equal the number of output channels.
- BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Note these tensors will use two (identical) batches.
@@ -1627,7 +1627,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
// If a bias is used, its size must equal the number of output channels.
bool biasEnabled = bias.size() > 0;
- BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Creates the tensors.
armnn::TensorInfo inputTensorInfo =
@@ -2135,11 +2135,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
bool biasEnabled = bias.size() > 0;
// This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
- BOOST_ASSERT(inputNum == 1);
- BOOST_ASSERT(outputNum == 1);
+ ARMNN_ASSERT(inputNum == 1);
+ ARMNN_ASSERT(outputNum == 1);
// If a bias is used, its size must equal the number of output channels.
- BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Note these tensors will use two (identical) batches.
diff --git a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
index c277d2d5e1..c64fc88024 100644
--- a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
+++ b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
@@ -6,6 +6,7 @@
#pragma once
#include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
#include <boost/multi_array.hpp>
@@ -14,7 +15,7 @@
template <std::size_t n>
boost::array<unsigned int, n> GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo)
{
- BOOST_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
+ ARMNN_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
"Attempting to construct a shape array of mismatching size");
boost::array<unsigned int, n> shape;
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index 772ae2ccc7..953b543acb 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -104,7 +104,7 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
outputHandle->Allocate();
CopyDataToITensorHandle(inputHandle.get(), input.origin());
- BOOST_ASSERT(workload);
+ ARMNN_ASSERT(workload);
ExecuteWorkload(*workload, memoryManager);
diff --git a/src/backends/cl/ClBackendContext.cpp b/src/backends/cl/ClBackendContext.cpp
index 068e2958af..f612c3743d 100644
--- a/src/backends/cl/ClBackendContext.cpp
+++ b/src/backends/cl/ClBackendContext.cpp
@@ -7,6 +7,7 @@
#include "ClContextControl.hpp"
#include <armnn/Logging.hpp>
+#include <armnn/utility/Assert.hpp>
#include <arm_compute/core/CL/OpenCL.h>
#include <arm_compute/core/CL/CLKernelLibrary.h>
@@ -184,7 +185,7 @@ ClBackendContext::ClBackendContext(const IRuntime::CreationOptions& options)
return TuningLevel::Exhaustive;
default:
{
- BOOST_ASSERT_MSG(false, "Tuning level not recognised.");
+ ARMNN_ASSERT_MSG(false, "Tuning level not recognised.");
return TuningLevel::None;
}
}
diff --git a/src/backends/cl/ClContextControl.cpp b/src/backends/cl/ClContextControl.cpp
index f307133085..dbcccce945 100644
--- a/src/backends/cl/ClContextControl.cpp
+++ b/src/backends/cl/ClContextControl.cpp
@@ -9,12 +9,12 @@
#include <LeakChecking.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <arm_compute/core/CL/CLKernelLibrary.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/polymorphic_cast.hpp>
@@ -59,11 +59,11 @@ ClContextControl::ClContextControl(arm_compute::CLTuner *tuner,
// Removes the use of global CL context.
cl::Context::setDefault(cl::Context{});
- BOOST_ASSERT(cl::Context::getDefault()() == NULL);
+ ARMNN_ASSERT(cl::Context::getDefault()() == NULL);
// Removes the use of global CL command queue.
cl::CommandQueue::setDefault(cl::CommandQueue{});
- BOOST_ASSERT(cl::CommandQueue::getDefault()() == NULL);
+ ARMNN_ASSERT(cl::CommandQueue::getDefault()() == NULL);
// Always load the OpenCL runtime.
LoadOpenClRuntime();
diff --git a/src/backends/cl/workloads/ClConstantWorkload.cpp b/src/backends/cl/workloads/ClConstantWorkload.cpp
index 39ae14eaf3..e928870324 100644
--- a/src/backends/cl/workloads/ClConstantWorkload.cpp
+++ b/src/backends/cl/workloads/ClConstantWorkload.cpp
@@ -33,7 +33,7 @@ void ClConstantWorkload::Execute() const
{
const ConstantQueueDescriptor& data = this->m_Data;
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_ASSERT(data.m_LayerOutput != nullptr);
arm_compute::CLTensor& output = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetDataType();
@@ -56,7 +56,7 @@ void ClConstantWorkload::Execute() const
}
default:
{
- BOOST_ASSERT_MSG(false, "Unknown data type");
+ ARMNN_ASSERT_MSG(false, "Unknown data type");
break;
}
}
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index e8af0ee3b7..73ec95ce9f 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -38,7 +38,7 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index 858eab4e00..8704b1276f 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -45,7 +45,7 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
@@ -125,7 +125,7 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
arm_compute::ActivationLayerInfo(),
aclDilationInfo);
- BOOST_ASSERT(m_DepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
InitializeArmComputeClTensorData(*m_KernelTensor, &weightsPermutedHandle);
@@ -148,7 +148,7 @@ void ClDepthwiseConvolutionWorkload::FreeUnusedTensors()
void ClDepthwiseConvolutionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionWorkload_Execute");
- BOOST_ASSERT(m_DepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
}
diff --git a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
index 7c0736645b..20b2104c62 100644
--- a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
@@ -38,7 +38,7 @@ arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo& i
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index b4bcc1c017..54e7717b7d 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -90,7 +90,7 @@ inline auto SetClSliceData(const std::vector<unsigned int>& m_begin,
inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
const ConstCpuTensorHandle* handle)
{
- BOOST_ASSERT(handle);
+ ARMNN_ASSERT(handle);
armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
switch(handle->GetTensorInfo().GetDataType())
@@ -116,7 +116,7 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>());
break;
default:
- BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+ ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
}
};
diff --git a/src/backends/neon/NeonInterceptorScheduler.cpp b/src/backends/neon/NeonInterceptorScheduler.cpp
index d8dd01bd6c..745c5fde62 100644
--- a/src/backends/neon/NeonInterceptorScheduler.cpp
+++ b/src/backends/neon/NeonInterceptorScheduler.cpp
@@ -5,8 +5,6 @@
#include "NeonInterceptorScheduler.hpp"
-#include <boost/assert.hpp>
-
namespace armnn{
NeonInterceptorScheduler::NeonInterceptorScheduler(arm_compute::IScheduler &realScheduler)
diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp
index 11d20878d7..fb2c2b5128 100644
--- a/src/backends/neon/NeonTensorHandle.hpp
+++ b/src/backends/neon/NeonTensorHandle.hpp
@@ -7,6 +7,8 @@
#include <BFloat16.hpp>
#include <Half.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <aclCommon/ArmComputeTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
@@ -61,7 +63,7 @@ public:
// If we have enabled Importing, don't manage the tensor
if (!m_IsImportEnabled)
{
- BOOST_ASSERT(m_MemoryGroup != nullptr);
+ ARMNN_ASSERT(m_MemoryGroup != nullptr);
m_MemoryGroup->manage(&m_Tensor);
}
}
diff --git a/src/backends/neon/NeonTimer.cpp b/src/backends/neon/NeonTimer.cpp
index 219edc9680..1079a0d57c 100644
--- a/src/backends/neon/NeonTimer.cpp
+++ b/src/backends/neon/NeonTimer.cpp
@@ -6,9 +6,10 @@
#include "NeonTimer.hpp"
#include "NeonInterceptorScheduler.hpp"
+#include <armnn/utility/Assert.hpp>
+
#include <memory>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
namespace armnn
@@ -21,7 +22,7 @@ static thread_local auto g_Interceptor = std::make_shared<NeonInterceptorSchedul
void NeonTimer::Start()
{
m_Kernels.clear();
- BOOST_ASSERT(g_Interceptor->GetKernels() == nullptr);
+ ARMNN_ASSERT(g_Interceptor->GetKernels() == nullptr);
g_Interceptor->SetKernels(&m_Kernels);
m_RealSchedulerType = arm_compute::Scheduler::get_type();
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index 83a2692b6e..b9cb807779 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -39,7 +39,7 @@ void NeonConstantWorkload::Execute() const
{
const ConstantQueueDescriptor& data = this->m_Data;
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_ASSERT(data.m_LayerOutput != nullptr);
arm_compute::ITensor& output =
boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType =
@@ -69,7 +69,7 @@ void NeonConstantWorkload::Execute() const
}
default:
{
- BOOST_ASSERT_MSG(false, "Unknown data type");
+ ARMNN_ASSERT_MSG(false, "Unknown data type");
break;
}
}
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 683decd45c..5d45642eef 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -37,7 +37,7 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
@@ -97,7 +97,7 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
m_ConvolutionLayer.reset(convolutionLayer.release());
- BOOST_ASSERT(m_ConvolutionLayer);
+ ARMNN_ASSERT(m_ConvolutionLayer);
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index e39fe54199..a9a3c75bfd 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -49,7 +49,7 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
@@ -127,7 +127,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
arm_compute::ActivationLayerInfo(),
aclDilationInfo);
- BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle);
@@ -144,7 +144,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
void NeonDepthwiseConvolutionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDepthwiseConvolutionWorkload_Execute");
- BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
m_pDepthwiseConvolutionLayer->run();
}
diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
index c62f71948c..ffca2076fe 100644
--- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
@@ -38,7 +38,7 @@ arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo&
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
@@ -81,7 +81,7 @@ NeonTransposeConvolution2dWorkload::NeonTransposeConvolution2dWorkload(
m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager);
m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo);
- BOOST_ASSERT(m_Layer);
+ ARMNN_ASSERT(m_Layer);
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 3f0fe842aa..c3c9d3dbbc 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -35,7 +35,7 @@ void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
const ConstCpuTensorHandle* handle)
{
- BOOST_ASSERT(handle);
+ ARMNN_ASSERT(handle);
switch(handle->GetTensorInfo().GetDataType())
{
@@ -59,7 +59,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
break;
default:
- BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+ ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
}
};
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 607c86b112..25d639a38a 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -348,7 +348,7 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
"Reference concatenation: output type not supported");
for (const TensorInfo* input : inputs)
{
- BOOST_ASSERT(input != nullptr);
+ ARMNN_ASSERT(input != nullptr);
supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
"Reference concatenation: input type not supported");
@@ -1864,7 +1864,7 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp
"Reference stack: output type not supported");
for (const TensorInfo* input : inputs)
{
- BOOST_ASSERT(input != nullptr);
+ ARMNN_ASSERT(input != nullptr);
supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
"Reference stack: input type not supported");
diff --git a/src/backends/reference/RefMemoryManager.cpp b/src/backends/reference/RefMemoryManager.cpp
index 4f15e39ee1..76054e41e1 100644
--- a/src/backends/reference/RefMemoryManager.cpp
+++ b/src/backends/reference/RefMemoryManager.cpp
@@ -4,7 +4,7 @@
//
#include "RefMemoryManager.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <algorithm>
@@ -35,7 +35,7 @@ RefMemoryManager::Pool* RefMemoryManager::Manage(unsigned int numBytes)
void RefMemoryManager::Allocate(RefMemoryManager::Pool* pool)
{
- BOOST_ASSERT(pool);
+ ARMNN_ASSERT(pool);
m_FreePools.push_back(pool);
}
@@ -75,25 +75,25 @@ RefMemoryManager::Pool::~Pool()
void* RefMemoryManager::Pool::GetPointer()
{
- BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
+ ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
return m_Pointer;
}
void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
{
- BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+ ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
m_Size = std::max(m_Size, numBytes);
}
void RefMemoryManager::Pool::Acquire()
{
- BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
+ ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
m_Pointer = ::operator new(size_t(m_Size));
}
void RefMemoryManager::Pool::Release()
{
- BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
+ ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
::operator delete(m_Pointer);
m_Pointer = nullptr;
}
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index 84a74edc1d..7d86b110a7 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -44,8 +44,8 @@ RefTensorHandle::~RefTensorHandle()
void RefTensorHandle::Manage()
{
- BOOST_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
- BOOST_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+ ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
+ ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
}
@@ -84,7 +84,7 @@ void* RefTensorHandle::GetPointer() const
}
else
{
- BOOST_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
+ ARMNN_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
return m_MemoryManager->GetPointer(m_Pool);
}
}
@@ -92,14 +92,14 @@ void* RefTensorHandle::GetPointer() const
void RefTensorHandle::CopyOutTo(void* dest) const
{
const void *src = GetPointer();
- BOOST_ASSERT(src);
+ ARMNN_ASSERT(src);
memcpy(dest, src, m_TensorInfo.GetNumBytes());
}
void RefTensorHandle::CopyInFrom(const void* src)
{
void *dest = GetPointer();
- BOOST_ASSERT(dest);
+ ARMNN_ASSERT(dest);
memcpy(dest, src, m_TensorInfo.GetNumBytes());
}
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index f43e8b67a9..be20644ab7 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -5,14 +5,13 @@
#pragma once
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
#include <ResolveType.hpp>
-#include <boost/assert.hpp>
-
namespace armnn
{
@@ -78,28 +77,28 @@ public:
TypedIterator& operator++() override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
++m_Iterator;
return *this;
}
TypedIterator& operator+=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator += increment;
return *this;
}
TypedIterator& operator-=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator -= increment;
return *this;
}
TypedIterator& operator[](const unsigned int index) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
return *this;
}
@@ -107,7 +106,7 @@ public:
TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
{
IgnoreUnused(axisIndex);
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
return *this;
}
@@ -504,7 +503,7 @@ public:
// This should be called to set index for per-axis Encoder/Decoder
PerAxisIterator& SetIndex(unsigned int index, unsigned int axisIndex) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
m_AxisIndex = axisIndex;
return *this;
@@ -519,7 +518,7 @@ public:
PerAxisIterator& operator++() override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
++m_Iterator;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
@@ -527,7 +526,7 @@ public:
PerAxisIterator& operator+=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator += increment;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
@@ -535,7 +534,7 @@ public:
PerAxisIterator& operator-=(const unsigned int decrement) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator -= decrement;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
@@ -543,7 +542,7 @@ public:
PerAxisIterator& operator[](const unsigned int index) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp
index 7efdb9b75c..bf7de1b04c 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.cpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp
@@ -9,7 +9,7 @@
#include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
using namespace armnnUtils;
@@ -42,11 +42,11 @@ void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
{
TensorShape inputShape = inputTensorInfo.GetShape();
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
TensorShape outputShape = outputTensorInfo.GetShape();
- BOOST_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
+ ARMNN_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
const unsigned int inputBatchSize = inputShape[0];
const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()];
@@ -55,12 +55,12 @@ void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()];
const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()];
- BOOST_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
+ ARMNN_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
const unsigned int blockShapeHeight = blockShape[0];
const unsigned int blockShapeWidth = blockShape[1];
- BOOST_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
+ ARMNN_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
const unsigned int cropsTop = cropsData[0].first;
const unsigned int cropsLeft = cropsData[1].first;
diff --git a/src/backends/reference/workloads/Concatenate.cpp b/src/backends/reference/workloads/Concatenate.cpp
index bb55424c0c..a85e34ee61 100644
--- a/src/backends/reference/workloads/Concatenate.cpp
+++ b/src/backends/reference/workloads/Concatenate.cpp
@@ -38,7 +38,7 @@ void Concatenate(const ConcatQueueDescriptor &data)
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
- BOOST_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
+ ARMNN_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
diff --git a/src/backends/reference/workloads/ConvImpl.cpp b/src/backends/reference/workloads/ConvImpl.cpp
index 0c13e3ba0d..9d2f410a25 100644
--- a/src/backends/reference/workloads/ConvImpl.cpp
+++ b/src/backends/reference/workloads/ConvImpl.cpp
@@ -5,7 +5,7 @@
#include "ConvImpl.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cmath>
#include <limits>
@@ -15,7 +15,7 @@ namespace armnn
QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier)
{
- BOOST_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
+ ARMNN_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
if (multiplier == 0.0f)
{
m_Multiplier = 0;
@@ -26,14 +26,14 @@ QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multi
const double q = std::frexp(multiplier, &m_RightShift);
m_RightShift = -m_RightShift;
int64_t qFixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
- BOOST_ASSERT(qFixed <= (1ll << 31));
+ ARMNN_ASSERT(qFixed <= (1ll << 31));
if (qFixed == (1ll << 31))
{
qFixed /= 2;
--m_RightShift;
}
- BOOST_ASSERT(m_RightShift >= 0);
- BOOST_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
+ ARMNN_ASSERT(m_RightShift >= 0);
+ ARMNN_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
m_Multiplier = static_cast<int32_t>(qFixed);
}
}
@@ -61,7 +61,7 @@ int32_t QuantizedMultiplierSmallerThanOne::SaturatingRoundingDoublingHighMul(int
int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent)
{
- BOOST_ASSERT(exponent >= 0 && exponent <= 31);
+ ARMNN_ASSERT(exponent >= 0 && exponent <= 31);
int32_t mask = (1 << exponent) - 1;
int32_t remainder = x & mask;
int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
diff --git a/src/backends/reference/workloads/ConvImpl.hpp b/src/backends/reference/workloads/ConvImpl.hpp
index 562fd3e296..f5aa8f3447 100644
--- a/src/backends/reference/workloads/ConvImpl.hpp
+++ b/src/backends/reference/workloads/ConvImpl.hpp
@@ -15,7 +15,6 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <cmath>
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 3434ccb764..deb3b1f4b2 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -10,7 +10,7 @@
#include <armnnUtils/FloatingPointConverter.hpp>
#include <armnnUtils/TensorUtils.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -142,7 +142,7 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
+ ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
break;
}
}
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
index 91ca160ae2..f5e9ec5498 100644
--- a/src/backends/reference/workloads/DepthToSpace.cpp
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -8,7 +8,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
using namespace armnnUtils;
@@ -22,7 +22,7 @@ void DepthToSpace(const TensorInfo& inputInfo,
unsigned int dataTypeSize)
{
const unsigned int blockSize = descriptor.m_BlockSize;
- BOOST_ASSERT(blockSize != 0u);
+ ARMNN_ASSERT(blockSize != 0u);
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int batches = inputShape[0];
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
index 63c0405efe..fdc8e30c75 100644
--- a/src/backends/reference/workloads/Dequantize.cpp
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -16,7 +16,7 @@ void Dequantize(Decoder<float>& inputDecoder,
const TensorInfo& outputInfo)
{
IgnoreUnused(outputInfo);
- BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+ ARMNN_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
{
// inputDecoder.Get() dequantizes the data element from whatever
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index 57cf01e4a1..61a504ec6b 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -5,8 +5,8 @@
#include "DetectionPostProcess.hpp"
+#include <armnn/utility/Assert.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <algorithm>
@@ -213,8 +213,8 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
// xmax
boxCorners[indexW] = xCentre + halfW;
- BOOST_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
- BOOST_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
+ ARMNN_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
+ ARMNN_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
}
unsigned int numClassesWithBg = desc.m_NumClasses + 1;
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index e93987da31..c0524a7719 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -9,7 +9,7 @@
#include <armnnUtils/TensorUtils.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -89,7 +89,7 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported target Data Type!");
+ ARMNN_ASSERT_MSG(false, "Unsupported target Data Type!");
break;
}
}
@@ -107,7 +107,7 @@ inline std::unique_ptr<Encoder<bool>> MakeEncoder(const TensorInfo& info, void*
}
default:
{
- BOOST_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
+ ARMNN_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
break;
}
}
diff --git a/src/backends/reference/workloads/FullyConnected.cpp b/src/backends/reference/workloads/FullyConnected.cpp
index 02d9b060ef..5a87520f84 100644
--- a/src/backends/reference/workloads/FullyConnected.cpp
+++ b/src/backends/reference/workloads/FullyConnected.cpp
@@ -7,8 +7,6 @@
#include "RefWorkloadUtils.hpp"
-#include <boost/assert.hpp>
-
namespace armnn
{
diff --git a/src/backends/reference/workloads/Gather.cpp b/src/backends/reference/workloads/Gather.cpp
index 4cf3a142a0..c23edcd3bd 100644
--- a/src/backends/reference/workloads/Gather.cpp
+++ b/src/backends/reference/workloads/Gather.cpp
@@ -36,7 +36,7 @@ void Gather(const TensorInfo& paramsInfo,
{
unsigned int indx = boost::numeric_cast<unsigned int>(indices[i]);
- BOOST_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
+ ARMNN_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
unsigned int startOffset = indx * paramsProduct;
unsigned int endOffset = startOffset + paramsProduct;
@@ -51,7 +51,7 @@ void Gather(const TensorInfo& paramsInfo,
}
}
- BOOST_ASSERT(outIndex == outputInfo.GetNumElements());
+ ARMNN_ASSERT(outIndex == outputInfo.GetNumElements());
}
} //namespace armnn
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index 103d62a8df..1998f50c87 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -6,11 +6,11 @@
#include "LogSoftmax.hpp"
#include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <cmath>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace
@@ -35,7 +35,7 @@ void LogSoftmax(Decoder<float>& input,
const unsigned int numDimensions = inputInfo.GetNumDimensions();
bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
- BOOST_ASSERT_MSG(axisIsValid,
+ ARMNN_ASSERT_MSG(axisIsValid,
"Axis index is not in range [-numDimensions, numDimensions).");
IgnoreUnused(axisIsValid);
diff --git a/src/backends/reference/workloads/Mean.cpp b/src/backends/reference/workloads/Mean.cpp
index f2c0a4fc3f..72080ef042 100644
--- a/src/backends/reference/workloads/Mean.cpp
+++ b/src/backends/reference/workloads/Mean.cpp
@@ -128,7 +128,7 @@ void Mean(const armnn::TensorInfo& inputInfo,
for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
{
unsigned int current = inputDims[resolvedAxis[idx]];
- BOOST_ASSERT(boost::numeric_cast<float>(current) <
+ ARMNN_ASSERT(boost::numeric_cast<float>(current) <
(std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis)));
numElementsInAxis *= current;
}
diff --git a/src/backends/reference/workloads/RefConstantWorkload.cpp b/src/backends/reference/workloads/RefConstantWorkload.cpp
index 3506198410..d3e65e6615 100644
--- a/src/backends/reference/workloads/RefConstantWorkload.cpp
+++ b/src/backends/reference/workloads/RefConstantWorkload.cpp
@@ -9,7 +9,7 @@
#include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cstring>
@@ -24,10 +24,10 @@ void RefConstantWorkload::PostAllocationConfigure()
{
const ConstantQueueDescriptor& data = this->m_Data;
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_ASSERT(data.m_LayerOutput != nullptr);
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
- BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
+ ARMNN_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
outputInfo.GetNumBytes());
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index ac82db90e5..f8c3548905 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -32,7 +32,7 @@ RefFullyConnectedWorkload::RefFullyConnectedWorkload(
void RefFullyConnectedWorkload::PostAllocationConfigure()
{
const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
- BOOST_ASSERT(inputInfo.GetNumDimensions() > 1);
+ ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
m_InputShape = inputInfo.GetShape();
m_InputDecoder = MakeDecoder<float>(inputInfo);
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
index a987e79dda..a2ace13144 100644
--- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
@@ -12,7 +12,7 @@
#include <Profiling.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -27,8 +27,8 @@ void RefLogSoftmaxWorkload::Execute() const
std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
- BOOST_ASSERT(decoder != nullptr);
- BOOST_ASSERT(encoder != nullptr);
+ ARMNN_ASSERT(decoder != nullptr);
+ ARMNN_ASSERT(encoder != nullptr);
LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters);
}
diff --git a/src/backends/reference/workloads/RefStackWorkload.cpp b/src/backends/reference/workloads/RefStackWorkload.cpp
index be36f40633..fc859506a3 100644
--- a/src/backends/reference/workloads/RefStackWorkload.cpp
+++ b/src/backends/reference/workloads/RefStackWorkload.cpp
@@ -26,7 +26,7 @@ void RefStackWorkload::Execute() const
if (!m_Data.m_Parameters.m_Axis)
{
float* output = GetOutputTensorData<float>(0, m_Data);
- BOOST_ASSERT(output != nullptr);
+ ARMNN_ASSERT(output != nullptr);
unsigned int numInputs = m_Data.m_Parameters.m_NumInputs;
unsigned int inputLength = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements();
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index bfd3c284ae..e994a09230 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -27,7 +27,7 @@ void RefStridedSliceWorkload::Execute() const
DataType inputDataType = inputInfo.GetDataType();
DataType outputDataType = outputInfo.GetDataType();
- BOOST_ASSERT(inputDataType == outputDataType);
+ ARMNN_ASSERT(inputDataType == outputDataType);
IgnoreUnused(outputDataType);
StridedSlice(inputInfo,
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
index 0223cdc56a..e972524f11 100644
--- a/src/backends/reference/workloads/Slice.cpp
+++ b/src/backends/reference/workloads/Slice.cpp
@@ -5,9 +5,9 @@
#include "Slice.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
@@ -22,11 +22,11 @@ void Slice(const TensorInfo& inputInfo,
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int numDims = inputShape.GetNumDimensions();
- BOOST_ASSERT(descriptor.m_Begin.size() == numDims);
- BOOST_ASSERT(descriptor.m_Size.size() == numDims);
+ ARMNN_ASSERT(descriptor.m_Begin.size() == numDims);
+ ARMNN_ASSERT(descriptor.m_Size.size() == numDims);
constexpr unsigned int maxNumDims = 4;
- BOOST_ASSERT(numDims <= maxNumDims);
+ ARMNN_ASSERT(numDims <= maxNumDims);
std::vector<unsigned int> paddedInput(4);
std::vector<unsigned int> paddedBegin(4);
@@ -65,10 +65,10 @@ void Slice(const TensorInfo& inputInfo,
unsigned int size2 = paddedSize[2];
unsigned int size3 = paddedSize[3];
- BOOST_ASSERT(begin0 + size0 <= dim0);
- BOOST_ASSERT(begin1 + size1 <= dim1);
- BOOST_ASSERT(begin2 + size2 <= dim2);
- BOOST_ASSERT(begin3 + size3 <= dim3);
+ ARMNN_ASSERT(begin0 + size0 <= dim0);
+ ARMNN_ASSERT(begin1 + size1 <= dim1);
+ ARMNN_ASSERT(begin2 + size2 <= dim2);
+ ARMNN_ASSERT(begin3 + size3 <= dim3);
const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
unsigned char* output = reinterpret_cast<unsigned char*>(outputData);
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index 5036389a10..32eca84849 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -16,9 +16,9 @@ namespace armnn
/// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis)
{
- BOOST_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
+ ARMNN_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
"Required axis index greater than number of dimensions.");
- BOOST_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
+ ARMNN_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
"Required axis index lower than negative of the number of dimensions");
unsigned int uAxis = axis < 0 ?
diff --git a/src/backends/reference/workloads/Splitter.cpp b/src/backends/reference/workloads/Splitter.cpp
index 3bddfb0cab..09edc5e0f5 100644
--- a/src/backends/reference/workloads/Splitter.cpp
+++ b/src/backends/reference/workloads/Splitter.cpp
@@ -6,8 +6,7 @@
#include "RefWorkloadUtils.hpp"
#include <backendsCommon/WorkloadData.hpp>
#include <armnn/Tensor.hpp>
-
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include "Splitter.hpp"
#include <cmath>
@@ -47,7 +46,7 @@ void Split(const SplitterQueueDescriptor& data)
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
- BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
+ ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
diff --git a/src/backends/reference/workloads/Splitter.hpp b/src/backends/reference/workloads/Splitter.hpp
index 271c6fdeb8..26309b080f 100644
--- a/src/backends/reference/workloads/Splitter.hpp
+++ b/src/backends/reference/workloads/Splitter.hpp
@@ -8,7 +8,7 @@
#include "RefWorkloadUtils.hpp"
#include <backendsCommon/WorkloadData.hpp>
#include <armnn/Tensor.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -38,7 +38,7 @@ void Splitter(const SplitterQueueDescriptor& data)
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
- BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
+ ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
@@ -67,10 +67,10 @@ void Splitter(const SplitterQueueDescriptor& data)
//We are within the view, to copy input data to the output corresponding to this view.
DataType* outputData = GetOutputTensorData<DataType>(viewIdx, data);
- BOOST_ASSERT(outputData);
+ ARMNN_ASSERT(outputData);
const DataType* inputData = GetInputTensorData<DataType>(0, data);
- BOOST_ASSERT(inputData);
+ ARMNN_ASSERT(inputData);
outputData[outIndex] = inputData[index];
}
diff --git a/src/backends/reference/workloads/StridedSlice.cpp b/src/backends/reference/workloads/StridedSlice.cpp
index 62f06dc5ec..b00b049ff6 100644
--- a/src/backends/reference/workloads/StridedSlice.cpp
+++ b/src/backends/reference/workloads/StridedSlice.cpp
@@ -7,7 +7,8 @@
#include <ResolveType.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
#include <cstring>
@@ -20,12 +21,12 @@ namespace
void PadParams(StridedSliceDescriptor& p, unsigned int dimCount)
{
- BOOST_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
+ ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
const unsigned int beginIndicesCount =
boost::numeric_cast<unsigned int>(p.m_Begin.size());
- BOOST_ASSERT(dimCount >= beginIndicesCount);
+ ARMNN_ASSERT(dimCount >= beginIndicesCount);
const unsigned int padCount = dimCount - beginIndicesCount;
p.m_Begin.resize(dimCount);
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index e03c42fe60..5d66fd5273 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -9,7 +9,7 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
@@ -25,7 +25,7 @@ public:
, m_Data(data)
, m_DataLayout(dataLayout)
{
- BOOST_ASSERT(m_Shape.GetNumDimensions() == 4);
+ ARMNN_ASSERT(m_Shape.GetNumDimensions() == 4);
}
DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const
diff --git a/src/profiling/CommandHandler.cpp b/src/profiling/CommandHandler.cpp
index bb60ac18f0..cae7037327 100644
--- a/src/profiling/CommandHandler.cpp
+++ b/src/profiling/CommandHandler.cpp
@@ -62,7 +62,7 @@ void CommandHandler::HandleCommands(IProfilingConnection& profilingConnection)
m_CommandHandlerRegistry.GetFunctor(packet.GetPacketFamily(),
packet.GetPacketId(),
version.GetEncodedValue());
- BOOST_ASSERT(commandHandlerFunctor);
+ ARMNN_ASSERT(commandHandlerFunctor);
commandHandlerFunctor->operator()(packet);
}
catch (const armnn::TimeoutException&)
diff --git a/src/profiling/CommandHandlerRegistry.cpp b/src/profiling/CommandHandlerRegistry.cpp
index 8070afe623..c2fef7a597 100644
--- a/src/profiling/CommandHandlerRegistry.cpp
+++ b/src/profiling/CommandHandlerRegistry.cpp
@@ -5,7 +5,8 @@
#include "CommandHandlerRegistry.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <boost/format.hpp>
namespace armnn
@@ -19,7 +20,7 @@ void CommandHandlerRegistry::RegisterFunctor(CommandHandlerFunctor* functor,
uint32_t packetId,
uint32_t version)
{
- BOOST_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
+ ARMNN_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
CommandHandlerKey key(familyId, packetId, version);
registry[key] = functor;
@@ -27,7 +28,7 @@ void CommandHandlerRegistry::RegisterFunctor(CommandHandlerFunctor* functor,
void CommandHandlerRegistry::RegisterFunctor(CommandHandlerFunctor* functor)
{
- BOOST_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
+ ARMNN_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
RegisterFunctor(functor, functor->GetFamilyId(), functor->GetPacketId(), functor->GetVersion());
}
diff --git a/src/profiling/CounterDirectory.cpp b/src/profiling/CounterDirectory.cpp
index c84da10506..415a66072f 100644
--- a/src/profiling/CounterDirectory.cpp
+++ b/src/profiling/CounterDirectory.cpp
@@ -8,6 +8,7 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/format.hpp>
@@ -37,11 +38,11 @@ const Category* CounterDirectory::RegisterCategory(const std::string& categoryNa
// Create the category
CategoryPtr category = std::make_unique<Category>(categoryName);
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
// Get the raw category pointer
const Category* categoryPtr = category.get();
- BOOST_ASSERT(categoryPtr);
+ ARMNN_ASSERT(categoryPtr);
// Register the category
m_Categories.insert(std::move(category));
@@ -99,11 +100,11 @@ const Device* CounterDirectory::RegisterDevice(const std::string& deviceName,
// Create the device
DevicePtr device = std::make_unique<Device>(deviceUid, deviceName, cores);
- BOOST_ASSERT(device);
+ ARMNN_ASSERT(device);
// Get the raw device pointer
const Device* devicePtr = device.get();
- BOOST_ASSERT(devicePtr);
+ ARMNN_ASSERT(devicePtr);
// Register the device
m_Devices.insert(std::make_pair(deviceUid, std::move(device)));
@@ -162,15 +163,15 @@ const CounterSet* CounterDirectory::RegisterCounterSet(const std::string& counte
// Get the counter set UID
uint16_t counterSetUid = GetNextUid();
- BOOST_ASSERT(counterSetUid == counterSetUidPeek);
+ ARMNN_ASSERT(counterSetUid == counterSetUidPeek);
// Create the counter set
CounterSetPtr counterSet = std::make_unique<CounterSet>(counterSetUid, counterSetName, count);
- BOOST_ASSERT(counterSet);
+ ARMNN_ASSERT(counterSet);
// Get the raw counter set pointer
const CounterSet* counterSetPtr = counterSet.get();
- BOOST_ASSERT(counterSetPtr);
+ ARMNN_ASSERT(counterSetPtr);
// Register the counter set
m_CounterSets.insert(std::make_pair(counterSetUid, std::move(counterSet)));
@@ -251,14 +252,14 @@ const Counter* CounterDirectory::RegisterCounter(const BackendId& backendId,
// Get the parent category
const CategoryPtr& parentCategory = *categoryIt;
- BOOST_ASSERT(parentCategory);
+ ARMNN_ASSERT(parentCategory);
// Check that a counter with the given name is not already registered within the parent category
const std::vector<uint16_t>& parentCategoryCounters = parentCategory->m_Counters;
for (uint16_t parentCategoryCounterUid : parentCategoryCounters)
{
const Counter* parentCategoryCounter = GetCounter(parentCategoryCounterUid);
- BOOST_ASSERT(parentCategoryCounter);
+ ARMNN_ASSERT(parentCategoryCounter);
if (parentCategoryCounter->m_Name == name)
{
@@ -290,7 +291,7 @@ const Counter* CounterDirectory::RegisterCounter(const BackendId& backendId,
// Get the counter UIDs and calculate the max counter UID
std::vector<uint16_t> counterUids = GetNextCounterUids(uid, deviceCores);
- BOOST_ASSERT(!counterUids.empty());
+ ARMNN_ASSERT(!counterUids.empty());
uint16_t maxCounterUid = deviceCores <= 1 ? counterUids.front() : counterUids.back();
// Get the counter units
@@ -308,11 +309,11 @@ const Counter* CounterDirectory::RegisterCounter(const BackendId& backendId,
unitsValue,
deviceUidValue,
counterSetUidValue);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Get the raw counter pointer
const Counter* counterPtr = counter.get();
- BOOST_ASSERT(counterPtr);
+ ARMNN_ASSERT(counterPtr);
// Process multiple counters if necessary
for (uint16_t counterUid : counterUids)
@@ -336,7 +337,7 @@ const Category* CounterDirectory::GetCategory(const std::string& categoryName) c
}
const Category* category = it->get();
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
return category;
}
@@ -350,8 +351,8 @@ const Device* CounterDirectory::GetDevice(uint16_t deviceUid) const
}
const Device* device = it->second.get();
- BOOST_ASSERT(device);
- BOOST_ASSERT(device->m_Uid == deviceUid);
+ ARMNN_ASSERT(device);
+ ARMNN_ASSERT(device->m_Uid == deviceUid);
return device;
}
@@ -365,8 +366,8 @@ const CounterSet* CounterDirectory::GetCounterSet(uint16_t counterSetUid) const
}
const CounterSet* counterSet = it->second.get();
- BOOST_ASSERT(counterSet);
- BOOST_ASSERT(counterSet->m_Uid == counterSetUid);
+ ARMNN_ASSERT(counterSet);
+ ARMNN_ASSERT(counterSet->m_Uid == counterSetUid);
return counterSet;
}
@@ -380,9 +381,9 @@ const Counter* CounterDirectory::GetCounter(uint16_t counterUid) const
}
const Counter* counter = it->second.get();
- BOOST_ASSERT(counter);
- BOOST_ASSERT(counter->m_Uid <= counterUid);
- BOOST_ASSERT(counter->m_Uid <= counter->m_MaxCounterUid);
+ ARMNN_ASSERT(counter);
+ ARMNN_ASSERT(counter->m_Uid <= counterUid);
+ ARMNN_ASSERT(counter->m_Uid <= counter->m_MaxCounterUid);
return counter;
}
@@ -449,7 +450,7 @@ CategoriesIt CounterDirectory::FindCategory(const std::string& categoryName) con
{
return std::find_if(m_Categories.begin(), m_Categories.end(), [&categoryName](const CategoryPtr& category)
{
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
return category->m_Name == categoryName;
});
@@ -464,8 +465,8 @@ DevicesIt CounterDirectory::FindDevice(const std::string& deviceName) const
{
return std::find_if(m_Devices.begin(), m_Devices.end(), [&deviceName](const auto& pair)
{
- BOOST_ASSERT(pair.second);
- BOOST_ASSERT(pair.second->m_Uid == pair.first);
+ ARMNN_ASSERT(pair.second);
+ ARMNN_ASSERT(pair.second->m_Uid == pair.first);
return pair.second->m_Name == deviceName;
});
@@ -480,8 +481,8 @@ CounterSetsIt CounterDirectory::FindCounterSet(const std::string& counterSetName
{
return std::find_if(m_CounterSets.begin(), m_CounterSets.end(), [&counterSetName](const auto& pair)
{
- BOOST_ASSERT(pair.second);
- BOOST_ASSERT(pair.second->m_Uid == pair.first);
+ ARMNN_ASSERT(pair.second);
+ ARMNN_ASSERT(pair.second->m_Uid == pair.first);
return pair.second->m_Name == counterSetName;
});
@@ -496,8 +497,8 @@ CountersIt CounterDirectory::FindCounter(const std::string& counterName) const
{
return std::find_if(m_Counters.begin(), m_Counters.end(), [&counterName](const auto& pair)
{
- BOOST_ASSERT(pair.second);
- BOOST_ASSERT(pair.second->m_Uid == pair.first);
+ ARMNN_ASSERT(pair.second);
+ ARMNN_ASSERT(pair.second->m_Uid == pair.first);
return pair.second->m_Name == counterName;
});
@@ -536,7 +537,7 @@ uint16_t CounterDirectory::GetNumberOfCores(const Optional<uint16_t>& numberOfCo
// Get the associated device
const DevicePtr& device = deviceIt->second;
- BOOST_ASSERT(device);
+ ARMNN_ASSERT(device);
// Get the number of cores of the associated device
return device->m_Cores;
diff --git a/src/profiling/FileOnlyProfilingConnection.cpp b/src/profiling/FileOnlyProfilingConnection.cpp
index 83229caad7..f9bdde961f 100644
--- a/src/profiling/FileOnlyProfilingConnection.cpp
+++ b/src/profiling/FileOnlyProfilingConnection.cpp
@@ -111,7 +111,7 @@ bool FileOnlyProfilingConnection::SendCounterSelectionPacket()
bool FileOnlyProfilingConnection::WritePacket(const unsigned char* buffer, uint32_t length)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
// Read Header and determine case
uint32_t outgoingHeaderAsWords[2];
diff --git a/src/profiling/ProfilingService.cpp b/src/profiling/ProfilingService.cpp
index 3a8f3f83a3..4d7241e7db 100644
--- a/src/profiling/ProfilingService.cpp
+++ b/src/profiling/ProfilingService.cpp
@@ -134,7 +134,7 @@ void ProfilingService::Update()
try
{
// Setup the profiling connection
- BOOST_ASSERT(m_ProfilingConnectionFactory);
+ ARMNN_ASSERT(m_ProfilingConnectionFactory);
m_ProfilingConnection = m_ProfilingConnectionFactory->GetProfilingConnection(m_Options);
}
catch (const Exception& e)
@@ -155,7 +155,7 @@ void ProfilingService::Update()
// "NotConnected" state
break;
case ProfilingState::WaitingForAck:
- BOOST_ASSERT(m_ProfilingConnection);
+ ARMNN_ASSERT(m_ProfilingConnection);
// Start the command thread
m_CommandHandler.Start(*m_ProfilingConnection);
@@ -204,7 +204,7 @@ void ProfilingService::Disconnect()
void ProfilingService::AddBackendProfilingContext(const BackendId backendId,
std::shared_ptr<armnn::profiling::IBackendProfilingContext> profilingContext)
{
- BOOST_ASSERT(profilingContext != nullptr);
+ ARMNN_ASSERT(profilingContext != nullptr);
// Register the backend counters
m_MaxGlobalCounterId = profilingContext->RegisterCounters(m_MaxGlobalCounterId);
m_BackendProfilingContexts.emplace(backendId, std::move(profilingContext));
@@ -238,7 +238,7 @@ uint32_t ProfilingService::GetCounterValue(uint16_t counterUid) const
{
CheckCounterUid(counterUid);
std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
- BOOST_ASSERT(counterValuePtr);
+ ARMNN_ASSERT(counterValuePtr);
return counterValuePtr->load(std::memory_order::memory_order_relaxed);
}
@@ -268,7 +268,7 @@ void ProfilingService::SetCounterValue(uint16_t counterUid, uint32_t value)
{
CheckCounterUid(counterUid);
std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
- BOOST_ASSERT(counterValuePtr);
+ ARMNN_ASSERT(counterValuePtr);
counterValuePtr->store(value, std::memory_order::memory_order_relaxed);
}
@@ -276,7 +276,7 @@ uint32_t ProfilingService::AddCounterValue(uint16_t counterUid, uint32_t value)
{
CheckCounterUid(counterUid);
std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
- BOOST_ASSERT(counterValuePtr);
+ ARMNN_ASSERT(counterValuePtr);
return counterValuePtr->fetch_add(value, std::memory_order::memory_order_relaxed);
}
@@ -284,7 +284,7 @@ uint32_t ProfilingService::SubtractCounterValue(uint16_t counterUid, uint32_t va
{
CheckCounterUid(counterUid);
std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
- BOOST_ASSERT(counterValuePtr);
+ ARMNN_ASSERT(counterValuePtr);
return counterValuePtr->fetch_sub(value, std::memory_order::memory_order_relaxed);
}
@@ -292,7 +292,7 @@ uint32_t ProfilingService::IncrementCounterValue(uint16_t counterUid)
{
CheckCounterUid(counterUid);
std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
- BOOST_ASSERT(counterValuePtr);
+ ARMNN_ASSERT(counterValuePtr);
return counterValuePtr->operator++(std::memory_order::memory_order_relaxed);
}
@@ -332,7 +332,7 @@ void ProfilingService::Initialize()
"Network loads",
"The number of networks loaded at runtime",
std::string("networks"));
- BOOST_ASSERT(loadedNetworksCounter);
+ ARMNN_ASSERT(loadedNetworksCounter);
InitializeCounterValue(loadedNetworksCounter->m_Uid);
}
// Register a counter for the number of unloaded networks
@@ -348,7 +348,7 @@ void ProfilingService::Initialize()
"Network unloads",
"The number of networks unloaded at runtime",
std::string("networks"));
- BOOST_ASSERT(unloadedNetworksCounter);
+ ARMNN_ASSERT(unloadedNetworksCounter);
InitializeCounterValue(unloadedNetworksCounter->m_Uid);
}
// Register a counter for the number of registered backends
@@ -364,7 +364,7 @@ void ProfilingService::Initialize()
"Backends registered",
"The number of registered backends",
std::string("backends"));
- BOOST_ASSERT(registeredBackendsCounter);
+ ARMNN_ASSERT(registeredBackendsCounter);
InitializeCounterValue(registeredBackendsCounter->m_Uid);
}
// Register a counter for the number of registered backends
@@ -380,7 +380,7 @@ void ProfilingService::Initialize()
"Backends unregistered",
"The number of unregistered backends",
std::string("backends"));
- BOOST_ASSERT(unregisteredBackendsCounter);
+ ARMNN_ASSERT(unregisteredBackendsCounter);
InitializeCounterValue(unregisteredBackendsCounter->m_Uid);
}
// Register a counter for the number of inferences run
@@ -396,7 +396,7 @@ void ProfilingService::Initialize()
"Inferences run",
"The number of inferences run",
std::string("inferences"));
- BOOST_ASSERT(inferencesRunCounter);
+ ARMNN_ASSERT(inferencesRunCounter);
InitializeCounterValue(inferencesRunCounter->m_Uid);
}
}
diff --git a/src/profiling/ProfilingService.hpp b/src/profiling/ProfilingService.hpp
index df7bd8f857..a6c5e29767 100644
--- a/src/profiling/ProfilingService.hpp
+++ b/src/profiling/ProfilingService.hpp
@@ -264,8 +264,8 @@ protected:
IProfilingConnectionFactory* other,
IProfilingConnectionFactory*& backup)
{
- BOOST_ASSERT(instance.m_ProfilingConnectionFactory);
- BOOST_ASSERT(other);
+ ARMNN_ASSERT(instance.m_ProfilingConnectionFactory);
+ ARMNN_ASSERT(other);
backup = instance.m_ProfilingConnectionFactory.release();
instance.m_ProfilingConnectionFactory.reset(other);
diff --git a/src/profiling/ProfilingUtils.cpp b/src/profiling/ProfilingUtils.cpp
index e419769600..e542b6945b 100644
--- a/src/profiling/ProfilingUtils.cpp
+++ b/src/profiling/ProfilingUtils.cpp
@@ -9,7 +9,7 @@
#include <WallClockTimer.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <fstream>
#include <iostream>
@@ -88,7 +88,7 @@ std::vector<uint16_t> GetNextCounterUids(uint16_t firstUid, uint16_t cores)
void WriteBytes(const IPacketBufferPtr& packetBuffer, unsigned int offset, const void* value, unsigned int valueSize)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
WriteBytes(packetBuffer->GetWritableData(), offset, value, valueSize);
}
@@ -102,36 +102,36 @@ uint32_t ConstructHeader(uint32_t packetFamily,
void WriteUint64(const std::unique_ptr<IPacketBuffer>& packetBuffer, unsigned int offset, uint64_t value)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
WriteUint64(packetBuffer->GetWritableData(), offset, value);
}
void WriteUint32(const IPacketBufferPtr& packetBuffer, unsigned int offset, uint32_t value)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
WriteUint32(packetBuffer->GetWritableData(), offset, value);
}
void WriteUint16(const IPacketBufferPtr& packetBuffer, unsigned int offset, uint16_t value)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
WriteUint16(packetBuffer->GetWritableData(), offset, value);
}
void WriteUint8(const IPacketBufferPtr& packetBuffer, unsigned int offset, uint8_t value)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
WriteUint8(packetBuffer->GetWritableData(), offset, value);
}
void WriteBytes(unsigned char* buffer, unsigned int offset, const void* value, unsigned int valueSize)
{
- BOOST_ASSERT(buffer);
- BOOST_ASSERT(value);
+ ARMNN_ASSERT(buffer);
+ ARMNN_ASSERT(value);
for (unsigned int i = 0; i < valueSize; i++, offset++)
{
@@ -141,7 +141,7 @@ void WriteBytes(unsigned char* buffer, unsigned int offset, const void* value, u
void WriteUint64(unsigned char* buffer, unsigned int offset, uint64_t value)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
buffer[offset] = static_cast<unsigned char>(value & 0xFF);
buffer[offset + 1] = static_cast<unsigned char>((value >> 8) & 0xFF);
@@ -155,7 +155,7 @@ void WriteUint64(unsigned char* buffer, unsigned int offset, uint64_t value)
void WriteUint32(unsigned char* buffer, unsigned int offset, uint32_t value)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
buffer[offset] = static_cast<unsigned char>(value & 0xFF);
buffer[offset + 1] = static_cast<unsigned char>((value >> 8) & 0xFF);
@@ -165,7 +165,7 @@ void WriteUint32(unsigned char* buffer, unsigned int offset, uint32_t value)
void WriteUint16(unsigned char* buffer, unsigned int offset, uint16_t value)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
buffer[offset] = static_cast<unsigned char>(value & 0xFF);
buffer[offset + 1] = static_cast<unsigned char>((value >> 8) & 0xFF);
@@ -173,50 +173,50 @@ void WriteUint16(unsigned char* buffer, unsigned int offset, uint16_t value)
void WriteUint8(unsigned char* buffer, unsigned int offset, uint8_t value)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
buffer[offset] = static_cast<unsigned char>(value);
}
void ReadBytes(const IPacketBufferPtr& packetBuffer, unsigned int offset, unsigned int valueSize, uint8_t outValue[])
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
ReadBytes(packetBuffer->GetReadableData(), offset, valueSize, outValue);
}
uint64_t ReadUint64(const IPacketBufferPtr& packetBuffer, unsigned int offset)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
return ReadUint64(packetBuffer->GetReadableData(), offset);
}
uint32_t ReadUint32(const IPacketBufferPtr& packetBuffer, unsigned int offset)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
return ReadUint32(packetBuffer->GetReadableData(), offset);
}
uint16_t ReadUint16(const IPacketBufferPtr& packetBuffer, unsigned int offset)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
return ReadUint16(packetBuffer->GetReadableData(), offset);
}
uint8_t ReadUint8(const IPacketBufferPtr& packetBuffer, unsigned int offset)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
return ReadUint8(packetBuffer->GetReadableData(), offset);
}
void ReadBytes(const unsigned char* buffer, unsigned int offset, unsigned int valueSize, uint8_t outValue[])
{
- BOOST_ASSERT(buffer);
- BOOST_ASSERT(outValue);
+ ARMNN_ASSERT(buffer);
+ ARMNN_ASSERT(outValue);
for (unsigned int i = 0; i < valueSize; i++, offset++)
{
@@ -226,7 +226,7 @@ void ReadBytes(const unsigned char* buffer, unsigned int offset, unsigned int va
uint64_t ReadUint64(const unsigned char* buffer, unsigned int offset)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
uint64_t value = 0;
value = static_cast<uint64_t>(buffer[offset]);
@@ -243,7 +243,7 @@ uint64_t ReadUint64(const unsigned char* buffer, unsigned int offset)
uint32_t ReadUint32(const unsigned char* buffer, unsigned int offset)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
uint32_t value = 0;
value = static_cast<uint32_t>(buffer[offset]);
@@ -255,7 +255,7 @@ uint32_t ReadUint32(const unsigned char* buffer, unsigned int offset)
uint16_t ReadUint16(const unsigned char* buffer, unsigned int offset)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
uint32_t value = 0;
value = static_cast<uint32_t>(buffer[offset]);
@@ -265,7 +265,7 @@ uint16_t ReadUint16(const unsigned char* buffer, unsigned int offset)
uint8_t ReadUint8(const unsigned char* buffer, unsigned int offset)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
return buffer[offset];
}
@@ -310,7 +310,7 @@ uint32_t CalculateSizeOfPaddedSwString(const std::string& str)
// Read TimelineMessageDirectoryPacket from given IPacketBuffer and offset
SwTraceMessage ReadSwTraceMessage(const unsigned char* packetBuffer, unsigned int& offset)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
unsigned int uint32_t_size = sizeof(uint32_t);
diff --git a/src/profiling/SendCounterPacket.cpp b/src/profiling/SendCounterPacket.cpp
index ae4bab91e7..24b86d4427 100644
--- a/src/profiling/SendCounterPacket.cpp
+++ b/src/profiling/SendCounterPacket.cpp
@@ -9,6 +9,7 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Conversion.hpp>
#include <Processes.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/format.hpp>
@@ -178,10 +179,10 @@ bool SendCounterPacket::CreateCategoryRecord(const CategoryPtr& category,
{
using namespace boost::numeric;
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
const std::string& categoryName = category->m_Name;
- BOOST_ASSERT(!categoryName.empty());
+ ARMNN_ASSERT(!categoryName.empty());
// Remove any duplicate counters
std::vector<uint16_t> categoryCounters;
@@ -299,13 +300,13 @@ bool SendCounterPacket::CreateDeviceRecord(const DevicePtr& device,
DeviceRecord& deviceRecord,
std::string& errorMessage)
{
- BOOST_ASSERT(device);
+ ARMNN_ASSERT(device);
uint16_t deviceUid = device->m_Uid;
const std::string& deviceName = device->m_Name;
uint16_t deviceCores = device->m_Cores;
- BOOST_ASSERT(!deviceName.empty());
+ ARMNN_ASSERT(!deviceName.empty());
// Device record word 0:
// 16:31 [16] uid: the unique identifier for the device
@@ -349,13 +350,13 @@ bool SendCounterPacket::CreateCounterSetRecord(const CounterSetPtr& counterSet,
CounterSetRecord& counterSetRecord,
std::string& errorMessage)
{
- BOOST_ASSERT(counterSet);
+ ARMNN_ASSERT(counterSet);
uint16_t counterSetUid = counterSet->m_Uid;
const std::string& counterSetName = counterSet->m_Name;
uint16_t counterSetCount = counterSet->m_Count;
- BOOST_ASSERT(!counterSetName.empty());
+ ARMNN_ASSERT(!counterSetName.empty());
// Counter set record word 0:
// 16:31 [16] uid: the unique identifier for the counter_set
@@ -402,7 +403,7 @@ bool SendCounterPacket::CreateEventRecord(const CounterPtr& counter,
{
using namespace boost::numeric;
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
uint16_t counterUid = counter->m_Uid;
uint16_t maxCounterUid = counter->m_MaxCounterUid;
@@ -415,9 +416,9 @@ bool SendCounterPacket::CreateEventRecord(const CounterPtr& counter,
const std::string& counterDescription = counter->m_Description;
const std::string& counterUnits = counter->m_Units;
- BOOST_ASSERT(counterClass == 0 || counterClass == 1);
- BOOST_ASSERT(counterInterpolation == 0 || counterInterpolation == 1);
- BOOST_ASSERT(counterMultiplier);
+ ARMNN_ASSERT(counterClass == 0 || counterClass == 1);
+ ARMNN_ASSERT(counterInterpolation == 0 || counterInterpolation == 1);
+ ARMNN_ASSERT(counterMultiplier);
// Utils
size_t uint32_t_size = sizeof(uint32_t);
@@ -450,7 +451,7 @@ bool SendCounterPacket::CreateEventRecord(const CounterPtr& counter,
// 0:63 [64] multiplier: internal data stream is represented as integer values, this allows scaling of
// those values as if they are fixed point numbers. Zero is not a valid value
uint32_t multiplier[2] = { 0u, 0u };
- BOOST_ASSERT(sizeof(counterMultiplier) == sizeof(multiplier));
+ ARMNN_ASSERT(sizeof(counterMultiplier) == sizeof(multiplier));
std::memcpy(multiplier, &counterMultiplier, sizeof(multiplier));
uint32_t eventRecordWord3 = multiplier[0];
uint32_t eventRecordWord4 = multiplier[1];
diff --git a/src/profiling/SendTimelinePacket.hpp b/src/profiling/SendTimelinePacket.hpp
index 3e52c9758b..9954bd9a04 100644
--- a/src/profiling/SendTimelinePacket.hpp
+++ b/src/profiling/SendTimelinePacket.hpp
@@ -9,7 +9,7 @@
#include "armnn/profiling/ISendTimelinePacket.hpp"
#include "ProfilingUtils.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <memory>
@@ -78,7 +78,7 @@ void SendTimelinePacket::ForwardWriteBinaryFunction(Func& func, Params&& ... par
try
{
ReserveBuffer();
- BOOST_ASSERT(m_WriteBuffer);
+ ARMNN_ASSERT(m_WriteBuffer);
unsigned int numberOfBytesWritten = 0;
// Header will be prepended to the buffer on Commit()
while ( true )
diff --git a/src/profiling/test/ProfilingMocks.hpp b/src/profiling/test/ProfilingMocks.hpp
index ada55d8dff..2cd44c40a2 100644
--- a/src/profiling/test/ProfilingMocks.hpp
+++ b/src/profiling/test/ProfilingMocks.hpp
@@ -16,9 +16,9 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Optional.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <atomic>
@@ -449,11 +449,11 @@ public:
{
// Create the category
CategoryPtr category = std::make_unique<Category>(categoryName);
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
// Get the raw category pointer
const Category* categoryPtr = category.get();
- BOOST_ASSERT(categoryPtr);
+ ARMNN_ASSERT(categoryPtr);
// Register the category
m_Categories.insert(std::move(category));
@@ -469,11 +469,11 @@ public:
// Create the device
DevicePtr device = std::make_unique<Device>(deviceUid, deviceName, cores);
- BOOST_ASSERT(device);
+ ARMNN_ASSERT(device);
// Get the raw device pointer
const Device* devicePtr = device.get();
- BOOST_ASSERT(devicePtr);
+ ARMNN_ASSERT(devicePtr);
// Register the device
m_Devices.insert(std::make_pair(deviceUid, std::move(device)));
@@ -490,11 +490,11 @@ public:
// Create the counter set
CounterSetPtr counterSet = std::make_unique<CounterSet>(counterSetUid, counterSetName, count);
- BOOST_ASSERT(counterSet);
+ ARMNN_ASSERT(counterSet);
// Get the raw counter set pointer
const CounterSet* counterSetPtr = counterSet.get();
- BOOST_ASSERT(counterSetPtr);
+ ARMNN_ASSERT(counterSetPtr);
// Register the counter set
m_CounterSets.insert(std::make_pair(counterSetUid, std::move(counterSet)));
@@ -528,7 +528,7 @@ public:
// Get the counter UIDs and calculate the max counter UID
std::vector<uint16_t> counterUids = GetNextCounterUids(uid, deviceCores);
- BOOST_ASSERT(!counterUids.empty());
+ ARMNN_ASSERT(!counterUids.empty());
uint16_t maxCounterUid = deviceCores <= 1 ? counterUids.front() : counterUids.back();
// Get the counter units
@@ -546,18 +546,18 @@ public:
unitsValue,
deviceUidValue,
counterSetUidValue);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Get the raw counter pointer
const Counter* counterPtr = counter.get();
- BOOST_ASSERT(counterPtr);
+ ARMNN_ASSERT(counterPtr);
// Process multiple counters if necessary
for (uint16_t counterUid : counterUids)
{
// Connect the counter to the parent category
Category* parentCategory = const_cast<Category*>(GetCategory(parentCategoryName));
- BOOST_ASSERT(parentCategory);
+ ARMNN_ASSERT(parentCategory);
parentCategory->m_Counters.push_back(counterUid);
// Register the counter
@@ -584,7 +584,7 @@ public:
{
auto it = std::find_if(m_Categories.begin(), m_Categories.end(), [&name](const CategoryPtr& category)
{
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
return category->m_Name == name;
});
diff --git a/src/profiling/test/ProfilingTestUtils.cpp b/src/profiling/test/ProfilingTestUtils.cpp
index 8de69f14ec..5c63b54b8f 100644
--- a/src/profiling/test/ProfilingTestUtils.cpp
+++ b/src/profiling/test/ProfilingTestUtils.cpp
@@ -31,7 +31,7 @@ void VerifyTimelineHeaderBinary(const unsigned char* readableData,
unsigned int& offset,
uint32_t packetDataLength)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
// Utils
unsigned int uint32_t_size = sizeof(uint32_t);
@@ -60,7 +60,7 @@ void VerifyTimelineLabelBinaryPacketData(Optional<ProfilingGuid> guid,
const unsigned char* readableData,
unsigned int& offset)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
// Utils
unsigned int uint32_t_size = sizeof(uint32_t);
@@ -101,7 +101,7 @@ void VerifyTimelineEventClassBinaryPacketData(ProfilingGuid guid,
const unsigned char* readableData,
unsigned int& offset)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
// Utils
unsigned int uint32_t_size = sizeof(uint32_t);
@@ -127,7 +127,7 @@ void VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType relati
const unsigned char* readableData,
unsigned int& offset)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
uint32_t relationshipTypeUint = 0;
switch (relationshipType)
@@ -205,7 +205,7 @@ void VerifyTimelineEntityBinaryPacketData(Optional<ProfilingGuid> guid,
const unsigned char* readableData,
unsigned int& offset)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
// Utils
unsigned int uint32_t_size = sizeof(uint32_t);
@@ -238,7 +238,7 @@ void VerifyTimelineEventBinaryPacket(Optional<uint64_t> timestamp,
const unsigned char* readableData,
unsigned int& offset)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
// Utils
unsigned int uint32_t_size = sizeof(uint32_t);
diff --git a/src/profiling/test/SendCounterPacketTests.cpp b/src/profiling/test/SendCounterPacketTests.cpp
index 51f049ddc6..a3c237faba 100644
--- a/src/profiling/test/SendCounterPacketTests.cpp
+++ b/src/profiling/test/SendCounterPacketTests.cpp
@@ -536,7 +536,7 @@ BOOST_AUTO_TEST_CASE(CreateEventRecordTest)
counterUnits,
deviceUid,
counterSetUid);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Create an event record
SendCounterPacket::EventRecord eventRecord;
@@ -656,7 +656,7 @@ BOOST_AUTO_TEST_CASE(CreateEventRecordNoUnitsTest)
"",
deviceUid,
counterSetUid);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Create an event record
SendCounterPacket::EventRecord eventRecord;
@@ -761,7 +761,7 @@ BOOST_AUTO_TEST_CASE(CreateInvalidEventRecordTest1)
counterUnits,
deviceUid,
counterSetUid);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Create an event record
SendCounterPacket::EventRecord eventRecord;
@@ -800,7 +800,7 @@ BOOST_AUTO_TEST_CASE(CreateInvalidEventRecordTest2)
counterUnits,
deviceUid,
counterSetUid);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Create an event record
SendCounterPacket::EventRecord eventRecord;
@@ -839,7 +839,7 @@ BOOST_AUTO_TEST_CASE(CreateInvalidEventRecordTest3)
counterUnits,
deviceUid,
counterSetUid);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Create an event record
SendCounterPacket::EventRecord eventRecord;
@@ -859,7 +859,7 @@ BOOST_AUTO_TEST_CASE(CreateCategoryRecordTest)
// Create a category for testing
const std::string categoryName = "some_category";
const CategoryPtr category = std::make_unique<Category>(categoryName);
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
category->m_Counters = { 11u, 23u, 5670u };
// Create a collection of counters
@@ -903,9 +903,9 @@ BOOST_AUTO_TEST_CASE(CreateCategoryRecordTest)
Counter* counter1 = counters.find(11)->second.get();
Counter* counter2 = counters.find(23)->second.get();
Counter* counter3 = counters.find(5670)->second.get();
- BOOST_ASSERT(counter1);
- BOOST_ASSERT(counter2);
- BOOST_ASSERT(counter3);
+ ARMNN_ASSERT(counter1);
+ ARMNN_ASSERT(counter2);
+ ARMNN_ASSERT(counter3);
uint16_t categoryEventCount = boost::numeric_cast<uint16_t>(counters.size());
// Create a category record
diff --git a/src/profiling/test/SendCounterPacketTests.hpp b/src/profiling/test/SendCounterPacketTests.hpp
index 7a5f7962e6..84c88ad9ae 100644
--- a/src/profiling/test/SendCounterPacketTests.hpp
+++ b/src/profiling/test/SendCounterPacketTests.hpp
@@ -13,9 +13,9 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Optional.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <atomic>
diff --git a/tests/CaffePreprocessor.cpp b/tests/CaffePreprocessor.cpp
index 6adc75dc64..7e7028966c 100644
--- a/tests/CaffePreprocessor.cpp
+++ b/tests/CaffePreprocessor.cpp
@@ -6,7 +6,6 @@
#include "CaffePreprocessor.hpp"
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <iostream>
diff --git a/tests/DeepSpeechV1InferenceTest.hpp b/tests/DeepSpeechV1InferenceTest.hpp
index 07b55d2ab8..7a33d34ace 100644
--- a/tests/DeepSpeechV1InferenceTest.hpp
+++ b/tests/DeepSpeechV1InferenceTest.hpp
@@ -7,9 +7,9 @@
#include "InferenceTest.hpp"
#include "DeepSpeechV1Database.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
@@ -40,13 +40,13 @@ public:
{
armnn::IgnoreUnused(options);
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // logits
- BOOST_ASSERT(output1.size() == k_OutputSize1);
+ ARMNN_ASSERT(output1.size() == k_OutputSize1);
const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // new_state_c
- BOOST_ASSERT(output2.size() == k_OutputSize2);
+ ARMNN_ASSERT(output2.size() == k_OutputSize2);
const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // new_state_h
- BOOST_ASSERT(output3.size() == k_OutputSize3);
+ ARMNN_ASSERT(output3.size() == k_OutputSize3);
// Check each output to see whether it is the expected value
for (unsigned int j = 0u; j < output1.size(); j++)
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index a59f58074b..9252a463cb 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -127,7 +127,7 @@ int main(int argc, const char* argv[])
// Coverity points out that default_value(...) can throw a bad_lexical_cast,
// and that desc.add_options() can throw boost::io::too_few_args.
// They really won't in any of these cases.
- BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
return EXIT_FAILURE;
}
diff --git a/tests/ImagePreprocessor.cpp b/tests/ImagePreprocessor.cpp
index f0184e466e..5a42b8ae31 100644
--- a/tests/ImagePreprocessor.cpp
+++ b/tests/ImagePreprocessor.cpp
@@ -11,7 +11,6 @@
#include <armnnUtils/Permute.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <iostream>
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 0529770b85..af931f99f8 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -7,6 +7,7 @@
#include <armnn/ArmNN.hpp>
#include <armnn/BackendRegistry.hpp>
+#include <armnn/utility/Assert.hpp>
#if defined(ARMNN_SERIALIZER)
#include "armnnDeserializer/IDeserializer.hpp"
@@ -179,7 +180,7 @@ public:
std::vector<armnn::BindingPointInfo>& outputBindings)
{
auto parser(IParser::Create());
- BOOST_ASSERT(parser);
+ ARMNN_ASSERT(parser);
armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
diff --git a/tests/InferenceTest.cpp b/tests/InferenceTest.cpp
index c6e5011ae4..7e165b5137 100644
--- a/tests/InferenceTest.cpp
+++ b/tests/InferenceTest.cpp
@@ -4,11 +4,12 @@
//
#include "InferenceTest.hpp"
+#include <armnn/utility/Assert.hpp>
+
#include "../src/armnn/Profiling.hpp"
#include <boost/algorithm/string.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/filesystem/path.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/program_options.hpp>
#include <boost/filesystem/operations.hpp>
@@ -55,7 +56,7 @@ bool ParseCommandLine(int argc, char** argv, IInferenceTestCaseProvider& testCas
// Coverity points out that default_value(...) can throw a bad_lexical_cast,
// and that desc.add_options() can throw boost::io::too_few_args.
// They really won't in any of these cases.
- BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
std::cerr << "Fatal internal error: " << e.what() << std::endl;
return false;
}
@@ -228,7 +229,7 @@ bool InferenceTest(const InferenceTestOptions& params,
success = false;
break;
default:
- BOOST_ASSERT_MSG(false, "Unexpected TestCaseResult");
+ ARMNN_ASSERT_MSG(false, "Unexpected TestCaseResult");
return false;
}
}
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 5b9b45a4a2..ed16464787 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -4,10 +4,10 @@
//
#include "InferenceTest.hpp"
+#include <armnn/utility/Assert.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/filesystem/path.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/program_options.hpp>
#include <boost/filesystem/operations.hpp>
@@ -80,7 +80,7 @@ struct ClassifierResultProcessor : public boost::static_visitor<>
void operator()(const std::vector<int>& values)
{
IgnoreUnused(values);
- BOOST_ASSERT_MSG(false, "Non-float predictions output not supported.");
+ ARMNN_ASSERT_MSG(false, "Non-float predictions output not supported.");
}
ResultMap& GetResultMap() { return m_ResultMap; }
@@ -360,9 +360,9 @@ int ClassifierInferenceTestMain(int argc,
const armnn::TensorShape* inputTensorShape)
{
- BOOST_ASSERT(modelFilename);
- BOOST_ASSERT(inputBindingName);
- BOOST_ASSERT(outputBindingName);
+ ARMNN_ASSERT(modelFilename);
+ ARMNN_ASSERT(inputBindingName);
+ ARMNN_ASSERT(outputBindingName);
return InferenceTestMain(argc, argv, defaultTestCaseIds,
[=]
diff --git a/tests/InferenceTestImage.cpp b/tests/InferenceTestImage.cpp
index 83c5cce346..1cf73caf45 100644
--- a/tests/InferenceTestImage.cpp
+++ b/tests/InferenceTestImage.cpp
@@ -4,6 +4,7 @@
//
#include "InferenceTestImage.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/format.hpp>
@@ -165,7 +166,7 @@ std::tuple<uint8_t, uint8_t, uint8_t> InferenceTestImage::GetPixelAs3Channels(un
const unsigned int pixelOffset = x * GetNumChannels() + y * GetWidth() * GetNumChannels();
const uint8_t* const pixelData = m_Data.data() + pixelOffset;
- BOOST_ASSERT(pixelData <= (m_Data.data() + GetSizeInBytes()));
+ ARMNN_ASSERT(pixelData <= (m_Data.data() + GetSizeInBytes()));
std::array<uint8_t, 3> outPixelData;
outPixelData.fill(0);
diff --git a/tests/MnistDatabase.cpp b/tests/MnistDatabase.cpp
index bd5029f841..c1c5f635b6 100644
--- a/tests/MnistDatabase.cpp
+++ b/tests/MnistDatabase.cpp
@@ -7,7 +7,7 @@
#include <armnn/Logging.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
+
#include <fstream>
#include <vector>
diff --git a/tests/MobileNetSsdInferenceTest.hpp b/tests/MobileNetSsdInferenceTest.hpp
index a26712c511..e02a4acedd 100644
--- a/tests/MobileNetSsdInferenceTest.hpp
+++ b/tests/MobileNetSsdInferenceTest.hpp
@@ -7,9 +7,9 @@
#include "InferenceTest.hpp"
#include "MobileNetSsdDatabase.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
@@ -38,16 +38,16 @@ public:
armnn::IgnoreUnused(options);
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes
- BOOST_ASSERT(output1.size() == k_OutputSize1);
+ ARMNN_ASSERT(output1.size() == k_OutputSize1);
const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // classes
- BOOST_ASSERT(output2.size() == k_OutputSize2);
+ ARMNN_ASSERT(output2.size() == k_OutputSize2);
const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // scores
- BOOST_ASSERT(output3.size() == k_OutputSize3);
+ ARMNN_ASSERT(output3.size() == k_OutputSize3);
const std::vector<float>& output4 = boost::get<std::vector<float>>(this->GetOutputs()[3]); // valid detections
- BOOST_ASSERT(output4.size() == k_OutputSize4);
+ ARMNN_ASSERT(output4.size() == k_OutputSize4);
const size_t numDetections = boost::numeric_cast<size_t>(output4[0]);
diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
index ecfc21209c..dd1c295d37 100644
--- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
+++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
@@ -109,7 +109,7 @@ int main(int argc, char* argv[])
// Coverity points out that default_value(...) can throw a bad_lexical_cast,
// and that desc.add_options() can throw boost::io::too_few_args.
// They really won't in any of these cases.
- BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
std::cerr << "Fatal internal error: " << e.what() << std::endl;
return 1;
}
diff --git a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
index 5c969c68dd..0e72f7bc1e 100644
--- a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
+++ b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
@@ -59,7 +59,7 @@ int main(int argc, char* argv[])
// Coverity points out that default_value(...) can throw a bad_lexical_cast,
// and that desc.add_options() can throw boost::io::too_few_args.
// They really won't in any of these cases.
- BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
std::cerr << "Fatal internal error: " << e.what() << std::endl;
return 1;
}
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index a0aeb8bc5a..278ba1b46a 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -824,7 +824,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
// Coverity points out that default_value(...) can throw a bad_lexical_cast,
// and that desc.add_options() can throw boost::io::too_few_args.
// They really won't in any of these cases.
- BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
return EXIT_FAILURE;
}
diff --git a/tests/YoloDatabase.cpp b/tests/YoloDatabase.cpp
index 98db8d4871..3b3e5a90d4 100644
--- a/tests/YoloDatabase.cpp
+++ b/tests/YoloDatabase.cpp
@@ -12,7 +12,6 @@
#include <tuple>
#include <utility>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
diff --git a/tests/YoloInferenceTest.hpp b/tests/YoloInferenceTest.hpp
index 4190e72365..6c783d3c48 100644
--- a/tests/YoloInferenceTest.hpp
+++ b/tests/YoloInferenceTest.hpp
@@ -7,13 +7,13 @@
#include "InferenceTest.hpp"
#include "YoloDatabase.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <algorithm>
#include <array>
#include <utility>
-#include <boost/assert.hpp>
#include <boost/multi_array.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
@@ -39,7 +39,7 @@ public:
using Boost3dArray = boost::multi_array<float, 3>;
const std::vector<float>& output = boost::get<std::vector<float>>(this->GetOutputs()[0]);
- BOOST_ASSERT(output.size() == YoloOutputSize);
+ ARMNN_ASSERT(output.size() == YoloOutputSize);
constexpr Boost3dArray::index gridSize = 7;
constexpr Boost3dArray::index numClasses = 20;
@@ -96,7 +96,7 @@ public:
}
}
}
- BOOST_ASSERT(output.data() + YoloOutputSize == outputPtr);
+ ARMNN_ASSERT(output.data() + YoloOutputSize == outputPtr);
std::vector<YoloDetectedObject> detectedObjects;
detectedObjects.reserve(gridSize * gridSize * numScales * numClasses);
diff --git a/tests/profiling/gatordmock/GatordMockService.cpp b/tests/profiling/gatordmock/GatordMockService.cpp
index 3e19c25b6c..aad335dc4d 100644
--- a/tests/profiling/gatordmock/GatordMockService.cpp
+++ b/tests/profiling/gatordmock/GatordMockService.cpp
@@ -362,7 +362,7 @@ armnn::profiling::Packet GatordMockService::ReceivePacket()
profiling::CommandHandlerFunctor* commandHandlerFunctor =
m_HandlerRegistry.GetFunctor(packetRx.GetPacketFamily(), packetRx.GetPacketId(), version.GetEncodedValue());
- BOOST_ASSERT(commandHandlerFunctor);
+ ARMNN_ASSERT(commandHandlerFunctor);
commandHandlerFunctor->operator()(packetRx);
return packetRx;
}
diff --git a/tests/profiling/gatordmock/tests/GatordMockTests.cpp b/tests/profiling/gatordmock/tests/GatordMockTests.cpp
index 7417946844..f8b42df674 100644
--- a/tests/profiling/gatordmock/tests/GatordMockTests.cpp
+++ b/tests/profiling/gatordmock/tests/GatordMockTests.cpp
@@ -98,11 +98,11 @@ BOOST_AUTO_TEST_CASE(CounterCaptureHandlingTest)
commandHandler(packet1);
commandHandler(packet2);
- BOOST_ASSERT(commandHandler.m_CurrentPeriodValue == 5000);
+ ARMNN_ASSERT(commandHandler.m_CurrentPeriodValue == 5000);
for (size_t i = 0; i < commandHandler.m_CounterCaptureValues.m_Uids.size(); ++i)
{
- BOOST_ASSERT(commandHandler.m_CounterCaptureValues.m_Uids[i] == i);
+ ARMNN_ASSERT(commandHandler.m_CounterCaptureValues.m_Uids[i] == i);
}
}