diff options
Diffstat (limited to 'src/backends/neon')
8 files changed, 17 insertions, 16 deletions
diff --git a/src/backends/neon/NeonInterceptorScheduler.cpp b/src/backends/neon/NeonInterceptorScheduler.cpp index d8dd01bd6c..745c5fde62 100644 --- a/src/backends/neon/NeonInterceptorScheduler.cpp +++ b/src/backends/neon/NeonInterceptorScheduler.cpp @@ -5,8 +5,6 @@ #include "NeonInterceptorScheduler.hpp" -#include <boost/assert.hpp> - namespace armnn{ NeonInterceptorScheduler::NeonInterceptorScheduler(arm_compute::IScheduler &realScheduler) diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp index 11d20878d7..fb2c2b5128 100644 --- a/src/backends/neon/NeonTensorHandle.hpp +++ b/src/backends/neon/NeonTensorHandle.hpp @@ -7,6 +7,8 @@ #include <BFloat16.hpp> #include <Half.hpp> +#include <armnn/utility/Assert.hpp> + #include <aclCommon/ArmComputeTensorHandle.hpp> #include <aclCommon/ArmComputeTensorUtils.hpp> @@ -61,7 +63,7 @@ public: // If we have enabled Importing, don't manage the tensor if (!m_IsImportEnabled) { - BOOST_ASSERT(m_MemoryGroup != nullptr); + ARMNN_ASSERT(m_MemoryGroup != nullptr); m_MemoryGroup->manage(&m_Tensor); } } diff --git a/src/backends/neon/NeonTimer.cpp b/src/backends/neon/NeonTimer.cpp index 219edc9680..1079a0d57c 100644 --- a/src/backends/neon/NeonTimer.cpp +++ b/src/backends/neon/NeonTimer.cpp @@ -6,9 +6,10 @@ #include "NeonTimer.hpp" #include "NeonInterceptorScheduler.hpp" +#include <armnn/utility/Assert.hpp> + #include <memory> -#include <boost/assert.hpp> #include <boost/format.hpp> namespace armnn @@ -21,7 +22,7 @@ static thread_local auto g_Interceptor = std::make_shared<NeonInterceptorSchedul void NeonTimer::Start() { m_Kernels.clear(); - BOOST_ASSERT(g_Interceptor->GetKernels() == nullptr); + ARMNN_ASSERT(g_Interceptor->GetKernels() == nullptr); g_Interceptor->SetKernels(&m_Kernels); m_RealSchedulerType = arm_compute::Scheduler::get_type(); diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp index 83a2692b6e..b9cb807779 100644 --- a/src/backends/neon/workloads/NeonConstantWorkload.cpp +++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp @@ -39,7 +39,7 @@ void NeonConstantWorkload::Execute() const { const ConstantQueueDescriptor& data = this->m_Data; - BOOST_ASSERT(data.m_LayerOutput != nullptr); + ARMNN_ASSERT(data.m_LayerOutput != nullptr); arm_compute::ITensor& output = boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor(); arm_compute::DataType computeDataType = @@ -69,7 +69,7 @@ void NeonConstantWorkload::Execute() const } default: { - BOOST_ASSERT_MSG(false, "Unknown data type"); + ARMNN_ASSERT_MSG(false, "Unknown data type"); break; } } diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp index 683decd45c..5d45642eef 100644 --- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp @@ -37,7 +37,7 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input, if (descriptor.m_BiasEnabled) { - BOOST_ASSERT(biases.has_value()); + ARMNN_ASSERT(biases.has_value()); aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout); optionalAclBiasesInfo = &aclBiasesInfo; @@ -97,7 +97,7 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload( m_ConvolutionLayer.reset(convolutionLayer.release()); - BOOST_ASSERT(m_ConvolutionLayer); + ARMNN_ASSERT(m_ConvolutionLayer); InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight); diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp index e39fe54199..a9a3c75bfd 100644 --- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp +++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp @@ -49,7 +49,7 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i if (descriptor.m_BiasEnabled) { - BOOST_ASSERT(biases.has_value()); + ARMNN_ASSERT(biases.has_value()); aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout); optionalAclBiasesInfo = &aclBiasesInfo; @@ -127,7 +127,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload( arm_compute::ActivationLayerInfo(), aclDilationInfo); - BOOST_ASSERT(m_pDepthwiseConvolutionLayer); + ARMNN_ASSERT(m_pDepthwiseConvolutionLayer); ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted); InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle); @@ -144,7 +144,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload( void NeonDepthwiseConvolutionWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDepthwiseConvolutionWorkload_Execute"); - BOOST_ASSERT(m_pDepthwiseConvolutionLayer); + ARMNN_ASSERT(m_pDepthwiseConvolutionLayer); m_pDepthwiseConvolutionLayer->run(); } diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp index c62f71948c..ffca2076fe 100644 --- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp @@ -38,7 +38,7 @@ arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo& if (descriptor.m_BiasEnabled) { - BOOST_ASSERT(biases.has_value()); + ARMNN_ASSERT(biases.has_value()); aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout); optionalAclBiasesInfo = &aclBiasesInfo; @@ -81,7 +81,7 @@ NeonTransposeConvolution2dWorkload::NeonTransposeConvolution2dWorkload( m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager); m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo); - BOOST_ASSERT(m_Layer); + ARMNN_ASSERT(m_Layer); InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight); diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp index 3f0fe842aa..c3c9d3dbbc 100644 --- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp +++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp @@ -35,7 +35,7 @@ void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData) inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor, const ConstCpuTensorHandle* handle) { - BOOST_ASSERT(handle); + ARMNN_ASSERT(handle); switch(handle->GetTensorInfo().GetDataType()) { @@ -59,7 +59,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor, CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>()); break; default: - BOOST_ASSERT_MSG(false, "Unexpected tensor type."); + ARMNN_ASSERT_MSG(false, "Unexpected tensor type."); } }; |