aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2020-11-21 03:04:18 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2020-12-01 10:41:54 +0000
commit40f51a63c8e7258db15269427ae4fe1ad199c550 (patch)
tree353253a41863966995a45556731e7181a643c003
parent327800401c4185d98fcc01b9c9efbc038a4228ed (diff)
downloadComputeLibrary-40f51a63c8e7258db15269427ae4fe1ad199c550.tar.gz
Update default C++ standard to C++14
(3RDPARTY_UPDATE) Resolves: COMPMID-3849 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: I6369f112337310140e2d6c8e79630cd11138dfa0 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4544 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
m---------3rdparty0
-rw-r--r--SConstruct2
-rw-r--r--arm_compute/core/utils/logging/Helpers.h3
-rw-r--r--arm_compute/core/utils/logging/Macros.h4
-rw-r--r--arm_compute/graph/Graph.h2
-rw-r--r--arm_compute/graph/TensorDescriptor.h3
-rw-r--r--arm_compute/graph/backends/BackendRegistry.h3
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h66
-rw-r--r--arm_compute/graph/backends/Utils.h4
-rw-r--r--arm_compute/graph/frontend/Layers.h32
-rw-r--r--arm_compute/runtime/Array.h3
-rw-r--r--arm_compute/runtime/CL/tuners/CLLWSList.h8
-rw-r--r--arm_compute/runtime/CL/tuners/Tuners.h6
-rw-r--r--arm_compute/runtime/MemoryRegion.h5
-rw-r--r--arm_compute/runtime/NEON/functions/NEQLSTMLayer.h3
-rw-r--r--docs/00_introduction.dox47
-rw-r--r--docs/05_contribution_guidelines.dox2
-rw-r--r--docs/Doxyfile2
-rw-r--r--examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp2
-rw-r--r--examples/graph_alexnet.cpp2
-rw-r--r--examples/graph_googlenet.cpp2
-rw-r--r--examples/graph_inception_resnet_v1.cpp4
-rw-r--r--examples/graph_inception_resnet_v2.cpp2
-rw-r--r--examples/graph_inception_v3.cpp2
-rw-r--r--examples/graph_inception_v4.cpp2
-rw-r--r--examples/graph_mobilenet.cpp2
-rw-r--r--examples/graph_mobilenet_v2.cpp2
-rw-r--r--examples/graph_resnet12.cpp4
-rw-r--r--examples/graph_resnet50.cpp4
-rw-r--r--examples/graph_resnet_v2_50.cpp2
-rw-r--r--examples/graph_shufflenet.cpp2
-rw-r--r--examples/graph_squeezenet.cpp2
-rw-r--r--examples/graph_squeezenet_v1_1.cpp2
-rw-r--r--examples/graph_srcnn955.cpp4
-rw-r--r--examples/graph_ssd_mobilenet.cpp2
-rw-r--r--examples/graph_vgg16.cpp2
-rw-r--r--examples/graph_vgg19.cpp2
-rw-r--r--examples/graph_vgg_vdsr.cpp4
-rw-r--r--examples/graph_yolov3.cpp4
-rw-r--r--examples/neon_cnn.cpp12
-rwxr-xr-xscripts/clang_tidy_rules.py2
-rw-r--r--src/core/CL/gemm/native/CLGEMMNativeKernelConfiguration.h8
-rw-r--r--src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h6
-rw-r--r--src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h6
-rw-r--r--src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp10
-rw-r--r--src/core/TensorInfo.cpp5
-rw-r--r--src/core/utils/logging/Logger.cpp9
-rw-r--r--src/graph/Graph.cpp4
-rw-r--r--src/graph/Utils.cpp14
-rw-r--r--src/graph/backends/CL/CLDeviceBackend.cpp6
-rw-r--r--src/graph/backends/CL/CLFunctionsFactory.cpp8
-rw-r--r--src/graph/backends/GLES/GCDeviceBackend.cpp2
-rw-r--r--src/graph/backends/NEON/NEDeviceBackend.cpp4
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp2
-rw-r--r--src/graph/mutators/SyntheticDataTypeMutator.cpp2
-rw-r--r--src/runtime/Allocator.cpp3
-rw-r--r--src/runtime/BlobLifetimeManager.cpp3
-rw-r--r--src/runtime/BlobMemoryPool.cpp3
-rw-r--r--src/runtime/CL/CLBufferAllocator.cpp3
-rw-r--r--src/runtime/CL/CLRuntimeContext.cpp4
-rw-r--r--src/runtime/CL/CLTensorAllocator.cpp18
-rw-r--r--src/runtime/CL/ICLSimpleFunction.cpp3
-rw-r--r--src/runtime/CL/functions/CLAbsoluteDifference.cpp3
-rw-r--r--src/runtime/CL/functions/CLAccumulate.cpp7
-rw-r--r--src/runtime/CL/functions/CLActivationLayer.cpp7
-rw-r--r--src/runtime/CL/functions/CLArgMinMaxLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLBatchNormalizationLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLBatchToSpaceLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLBitwiseAnd.cpp3
-rw-r--r--src/runtime/CL/functions/CLBitwiseNot.cpp3
-rw-r--r--src/runtime/CL/functions/CLBitwiseOr.cpp3
-rw-r--r--src/runtime/CL/functions/CLBitwiseXor.cpp3
-rw-r--r--src/runtime/CL/functions/CLBoundingBoxTransform.cpp3
-rw-r--r--src/runtime/CL/functions/CLBox3x3.cpp3
-rw-r--r--src/runtime/CL/functions/CLCannyEdge.cpp15
-rw-r--r--src/runtime/CL/functions/CLCast.cpp3
-rw-r--r--src/runtime/CL/functions/CLChannelCombine.cpp5
-rw-r--r--src/runtime/CL/functions/CLChannelExtract.cpp5
-rw-r--r--src/runtime/CL/functions/CLChannelShuffleLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLColorConvert.cpp9
-rw-r--r--src/runtime/CL/functions/CLComparison.cpp5
-rw-r--r--src/runtime/CL/functions/CLComputeAllAnchors.cpp4
-rw-r--r--src/runtime/CL/functions/CLConcatenateLayer.cpp17
-rw-r--r--src/runtime/CL/functions/CLConvertFullyConnectedWeights.cpp4
-rw-r--r--src/runtime/CL/functions/CLConvolution.cpp10
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp9
-rw-r--r--src/runtime/CL/functions/CLCopy.cpp3
-rw-r--r--src/runtime/CL/functions/CLCropResize.cpp20
-rw-r--r--src/runtime/CL/functions/CLDeconvolutionLayer.cpp5
-rw-r--r--src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp5
-rw-r--r--src/runtime/CL/functions/CLDepthConvertLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLDepthToSpaceLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp13
-rw-r--r--src/runtime/CL/functions/CLDequantizationLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLDerivative.cpp3
-rw-r--r--src/runtime/CL/functions/CLDilate.cpp3
-rw-r--r--src/runtime/CL/functions/CLDirectConvolutionLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLElementWiseUnaryLayer.cpp43
-rw-r--r--src/runtime/CL/functions/CLElementwiseOperations.cpp43
-rw-r--r--src/runtime/CL/functions/CLEqualizeHistogram.cpp7
-rw-r--r--src/runtime/CL/functions/CLErode.cpp3
-rw-r--r--src/runtime/CL/functions/CLFFT1D.cpp7
-rw-r--r--src/runtime/CL/functions/CLFFTConvolutionLayer.cpp4
-rw-r--r--src/runtime/CL/functions/CLFastCorners.cpp5
-rw-r--r--src/runtime/CL/functions/CLFill.cpp4
-rw-r--r--src/runtime/CL/functions/CLFillBorder.cpp3
-rw-r--r--src/runtime/CL/functions/CLFlattenLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLFloor.cpp3
-rw-r--r--src/runtime/CL/functions/CLFullyConnectedLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLFuseBatchNormalization.cpp3
-rw-r--r--src/runtime/CL/functions/CLGEMM.cpp18
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp9
-rw-r--r--src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp17
-rw-r--r--src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp13
-rw-r--r--src/runtime/CL/functions/CLGather.cpp3
-rw-r--r--src/runtime/CL/functions/CLGaussian3x3.cpp3
-rw-r--r--src/runtime/CL/functions/CLGaussian5x5.cpp7
-rw-r--r--src/runtime/CL/functions/CLGaussianPyramid.cpp11
-rw-r--r--src/runtime/CL/functions/CLGenerateProposalsLayer.cpp17
-rw-r--r--src/runtime/CL/functions/CLHOGDescriptor.cpp5
-rw-r--r--src/runtime/CL/functions/CLHOGDetector.cpp3
-rw-r--r--src/runtime/CL/functions/CLHOGGradient.cpp3
-rw-r--r--src/runtime/CL/functions/CLHOGMultiDetection.cpp5
-rw-r--r--src/runtime/CL/functions/CLHarrisCorners.cpp13
-rw-r--r--src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLIntegralImage.cpp5
-rw-r--r--src/runtime/CL/functions/CLL2NormalizeLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLLSTMLayer.cpp7
-rw-r--r--src/runtime/CL/functions/CLLocallyConnectedLayer.cpp9
-rw-r--r--src/runtime/CL/functions/CLLogicalAnd.cpp7
-rw-r--r--src/runtime/CL/functions/CLLogicalNot.cpp7
-rw-r--r--src/runtime/CL/functions/CLLogicalOr.cpp7
-rw-r--r--src/runtime/CL/functions/CLMagnitude.cpp3
-rw-r--r--src/runtime/CL/functions/CLMaxUnpoolingLayer.cpp5
-rw-r--r--src/runtime/CL/functions/CLMeanStdDev.cpp5
-rw-r--r--src/runtime/CL/functions/CLMeanStdDevNormalizationLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLMedian3x3.cpp3
-rw-r--r--src/runtime/CL/functions/CLMinMaxLocation.cpp5
-rw-r--r--src/runtime/CL/functions/CLNonLinearFilter.cpp3
-rw-r--r--src/runtime/CL/functions/CLNonMaximaSuppression3x3.cpp3
-rw-r--r--src/runtime/CL/functions/CLNormalizationLayer.cpp5
-rw-r--r--src/runtime/CL/functions/CLNormalizePlanarYUVLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLOpticalFlow.cpp17
-rw-r--r--src/runtime/CL/functions/CLPReluLayer.cpp7
-rw-r--r--src/runtime/CL/functions/CLPadLayer.cpp5
-rw-r--r--src/runtime/CL/functions/CLPermute.cpp3
-rw-r--r--src/runtime/CL/functions/CLPhase.cpp3
-rw-r--r--src/runtime/CL/functions/CLPixelWiseMultiplication.cpp17
-rw-r--r--src/runtime/CL/functions/CLPoolingLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLPriorBoxLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLQLSTMLayer.cpp23
-rw-r--r--src/runtime/CL/functions/CLQuantizationLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLRNNLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLROIAlignLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLROIPoolingLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLRange.cpp3
-rw-r--r--src/runtime/CL/functions/CLReductionOperation.cpp15
-rw-r--r--src/runtime/CL/functions/CLRemap.cpp3
-rw-r--r--src/runtime/CL/functions/CLReorgLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLReshapeLayer.cpp7
-rw-r--r--src/runtime/CL/functions/CLReverse.cpp3
-rw-r--r--src/runtime/CL/functions/CLScale.cpp3
-rw-r--r--src/runtime/CL/functions/CLScharr3x3.cpp3
-rw-r--r--src/runtime/CL/functions/CLSelect.cpp4
-rw-r--r--src/runtime/CL/functions/CLSlice.cpp7
-rw-r--r--src/runtime/CL/functions/CLSobel3x3.cpp3
-rw-r--r--src/runtime/CL/functions/CLSobel5x5.cpp7
-rw-r--r--src/runtime/CL/functions/CLSobel7x7.cpp7
-rw-r--r--src/runtime/CL/functions/CLSoftmaxLayer.cpp5
-rw-r--r--src/runtime/CL/functions/CLSpaceToBatchLayer.cpp5
-rw-r--r--src/runtime/CL/functions/CLSpaceToDepthLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLStackLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLStridedSlice.cpp7
-rw-r--r--src/runtime/CL/functions/CLTableLookup.cpp3
-rw-r--r--src/runtime/CL/functions/CLThreshold.cpp3
-rw-r--r--src/runtime/CL/functions/CLTile.cpp3
-rw-r--r--src/runtime/CL/functions/CLTranspose.cpp3
-rw-r--r--src/runtime/CL/functions/CLUpsampleLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLWarpAffine.cpp3
-rw-r--r--src/runtime/CL/functions/CLWarpPerspective.cpp3
-rw-r--r--src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp5
-rw-r--r--src/runtime/CL/functions/CLWinogradInputTransform.cpp3
-rw-r--r--src/runtime/CL/functions/CLYOLOLayer.cpp3
-rw-r--r--src/runtime/CL/gemm/CLGEMMKernelSelection.h8
-rw-r--r--src/runtime/CPP/CPPScheduler.cpp4
-rw-r--r--src/runtime/CPP/functions/CPPNonMaximumSuppression.cpp3
-rw-r--r--src/runtime/CPP/functions/CPPPermute.cpp3
-rw-r--r--src/runtime/CPP/functions/CPPTopKV.cpp3
-rw-r--r--src/runtime/CPP/functions/CPPUpsample.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp4
-rw-r--r--src/runtime/GLES_COMPUTE/GCRuntimeContext.cpp4
-rw-r--r--src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCAbsoluteDifference.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCActivationLayer.cpp3
-rwxr-xr-xsrc/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp4
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.cpp7
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCFillBorder.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCGEMMInterleave4x4.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCGEMMTranspose1xW.cpp3
-rwxr-xr-xsrc/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCPoolingLayer.cpp4
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCScale.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCTensorShift.cpp3
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCTranspose.cpp3
-rw-r--r--src/runtime/NEON/functions/NEAbsoluteDifference.cpp3
-rw-r--r--src/runtime/NEON/functions/NEAccumulate.cpp9
-rw-r--r--src/runtime/NEON/functions/NEActivationLayer.cpp7
-rw-r--r--src/runtime/NEON/functions/NEArgMinMaxLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NEArithmeticAddition.cpp7
-rw-r--r--src/runtime/NEON/functions/NEArithmeticSubtraction.cpp7
-rw-r--r--src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NEBatchToSpaceLayer.cpp6
-rw-r--r--src/runtime/NEON/functions/NEBitwiseAnd.cpp3
-rw-r--r--src/runtime/NEON/functions/NEBitwiseNot.cpp3
-rw-r--r--src/runtime/NEON/functions/NEBitwiseOr.cpp3
-rw-r--r--src/runtime/NEON/functions/NEBitwiseXor.cpp3
-rw-r--r--src/runtime/NEON/functions/NEBoundingBoxTransform.cpp4
-rw-r--r--src/runtime/NEON/functions/NEBox3x3.cpp7
-rw-r--r--src/runtime/NEON/functions/NECannyEdge.cpp17
-rw-r--r--src/runtime/NEON/functions/NECast.cpp3
-rw-r--r--src/runtime/NEON/functions/NEChannelCombine.cpp5
-rw-r--r--src/runtime/NEON/functions/NEChannelExtract.cpp5
-rw-r--r--src/runtime/NEON/functions/NEChannelShuffleLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NECol2Im.cpp3
-rw-r--r--src/runtime/NEON/functions/NEColorConvert.cpp9
-rw-r--r--src/runtime/NEON/functions/NEComputeAllAnchors.cpp3
-rw-r--r--src/runtime/NEON/functions/NEConcatenateLayer.cpp13
-rw-r--r--src/runtime/NEON/functions/NEConvertFullyConnectedWeights.cpp3
-rw-r--r--src/runtime/NEON/functions/NEConvolution.cpp17
-rw-r--r--src/runtime/NEON/functions/NEConvolutionLayer.cpp12
-rw-r--r--src/runtime/NEON/functions/NECopy.cpp3
-rw-r--r--src/runtime/NEON/functions/NECropResize.cpp10
-rw-r--r--src/runtime/NEON/functions/NEDepthConvertLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEDepthToSpaceLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEDequantizationLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEDerivative.cpp5
-rw-r--r--src/runtime/NEON/functions/NEDilate.cpp5
-rw-r--r--src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp7
-rw-r--r--src/runtime/NEON/functions/NEElementwiseOperators.cpp43
-rw-r--r--src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp15
-rw-r--r--src/runtime/NEON/functions/NEEqualizeHistogram.cpp7
-rw-r--r--src/runtime/NEON/functions/NEErode.cpp5
-rw-r--r--src/runtime/NEON/functions/NEFFT1D.cpp7
-rw-r--r--src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NEFastCorners.cpp9
-rw-r--r--src/runtime/NEON/functions/NEFill.cpp3
-rw-r--r--src/runtime/NEON/functions/NEFillBorder.cpp3
-rw-r--r--src/runtime/NEON/functions/NEFlattenLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEFloor.cpp3
-rw-r--r--src/runtime/NEON/functions/NEFullyConnectedLayer.cpp6
-rw-r--r--src/runtime/NEON/functions/NEFuseBatchNormalization.cpp3
-rw-r--r--src/runtime/NEON/functions/NEGEMM.cpp9
-rw-r--r--src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp8
-rw-r--r--src/runtime/NEON/functions/NEGEMMConv2d.cpp2
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp7
-rw-r--r--src/runtime/NEON/functions/NEGEMMInterleave4x4.cpp3
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp22
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp15
-rw-r--r--src/runtime/NEON/functions/NEGEMMTranspose1xW.cpp3
-rw-r--r--src/runtime/NEON/functions/NEGather.cpp3
-rw-r--r--src/runtime/NEON/functions/NEGaussian3x3.cpp5
-rw-r--r--src/runtime/NEON/functions/NEGaussian5x5.cpp7
-rw-r--r--src/runtime/NEON/functions/NEGaussianPyramid.cpp9
-rw-r--r--src/runtime/NEON/functions/NEHOGDescriptor.cpp5
-rw-r--r--src/runtime/NEON/functions/NEHOGDetector.cpp3
-rw-r--r--src/runtime/NEON/functions/NEHOGGradient.cpp5
-rw-r--r--src/runtime/NEON/functions/NEHarrisCorners.cpp17
-rw-r--r--src/runtime/NEON/functions/NEHistogram.cpp3
-rw-r--r--src/runtime/NEON/functions/NEIm2Col.cpp3
-rw-r--r--src/runtime/NEON/functions/NEInstanceNormalizationLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEIntegralImage.cpp5
-rw-r--r--src/runtime/NEON/functions/NEL2NormalizeLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NELocallyConnectedLayer.cpp5
-rw-r--r--src/runtime/NEON/functions/NELogical.cpp13
-rw-r--r--src/runtime/NEON/functions/NEMagnitude.cpp5
-rw-r--r--src/runtime/NEON/functions/NEMaxUnpoolingLayer.cpp5
-rw-r--r--src/runtime/NEON/functions/NEMeanStdDev.cpp5
-rw-r--r--src/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEMedian3x3.cpp5
-rw-r--r--src/runtime/NEON/functions/NEMinMaxLocation.cpp5
-rw-r--r--src/runtime/NEON/functions/NENonLinearFilter.cpp5
-rw-r--r--src/runtime/NEON/functions/NENonMaximaSuppression3x3.cpp5
-rw-r--r--src/runtime/NEON/functions/NENormalizationLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEOpticalFlow.cpp3
-rw-r--r--src/runtime/NEON/functions/NEPReluLayer.cpp7
-rw-r--r--src/runtime/NEON/functions/NEPadLayer.cpp5
-rw-r--r--src/runtime/NEON/functions/NEPermute.cpp3
-rw-r--r--src/runtime/NEON/functions/NEPhase.cpp5
-rw-r--r--src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp13
-rw-r--r--src/runtime/NEON/functions/NEPoolingLayer.cpp5
-rw-r--r--src/runtime/NEON/functions/NEPriorBoxLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NEQLSTMLayer.cpp21
-rw-r--r--src/runtime/NEON/functions/NEQuantizationLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NERNNLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEROIAlignLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEROIPoolingLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NERange.cpp3
-rw-r--r--src/runtime/NEON/functions/NEReductionOperation.cpp3
-rw-r--r--src/runtime/NEON/functions/NERemap.cpp5
-rw-r--r--src/runtime/NEON/functions/NEReorgLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEReshapeLayer.cpp7
-rw-r--r--src/runtime/NEON/functions/NEReverse.cpp3
-rw-r--r--src/runtime/NEON/functions/NEScale.cpp3
-rw-r--r--src/runtime/NEON/functions/NEScharr3x3.cpp5
-rw-r--r--src/runtime/NEON/functions/NESelect.cpp3
-rw-r--r--src/runtime/NEON/functions/NESlice.cpp8
-rw-r--r--src/runtime/NEON/functions/NESobel3x3.cpp5
-rw-r--r--src/runtime/NEON/functions/NESobel5x5.cpp7
-rw-r--r--src/runtime/NEON/functions/NESobel7x7.cpp7
-rw-r--r--src/runtime/NEON/functions/NESoftmaxLayer.cpp7
-rw-r--r--src/runtime/NEON/functions/NESpaceToBatchLayer.cpp9
-rw-r--r--src/runtime/NEON/functions/NESpaceToDepthLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEStackLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEStridedSlice.cpp7
-rw-r--r--src/runtime/NEON/functions/NETableLookup.cpp3
-rw-r--r--src/runtime/NEON/functions/NEThreshold.cpp3
-rw-r--r--src/runtime/NEON/functions/NETile.cpp3
-rw-r--r--src/runtime/NEON/functions/NETranspose.cpp3
-rw-r--r--src/runtime/NEON/functions/NEUpsampleLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEWarpAffine.cpp7
-rw-r--r--src/runtime/NEON/functions/NEWarpPerspective.cpp7
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp61
-rw-r--r--src/runtime/NEON/functions/NEYOLOLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp36
-rw-r--r--src/runtime/OffsetLifetimeManager.cpp3
-rw-r--r--src/runtime/OffsetMemoryPool.cpp3
-rw-r--r--src/runtime/PoolManager.cpp5
-rw-r--r--src/runtime/Scheduler.cpp7
-rw-r--r--src/runtime/SchedulerFactory.cpp8
-rw-r--r--src/runtime/TensorAllocator.cpp5
-rw-r--r--support/MemorySupport.h114
-rw-r--r--tests/CL/Helper.h10
-rw-r--r--tests/NEON/Helper.h12
-rw-r--r--tests/RawTensor.cpp8
-rw-r--r--tests/SimpleTensor.h7
-rw-r--r--tests/framework/Framework.cpp4
-rw-r--r--tests/framework/Framework.h2
-rw-r--r--tests/framework/TestCaseFactory.h5
-rw-r--r--tests/framework/command_line/CommonOptions.cpp12
-rw-r--r--tests/framework/instruments/Instrument.h4
-rw-r--r--tests/framework/instruments/SchedulerTimer.cpp2
-rw-r--r--tests/main.cpp13
-rw-r--r--tests/validate_examples/RunExample.cpp6
-rw-r--r--tests/validate_examples/graph_validate_utils.h10
-rw-r--r--tests/validation/CL/UNIT/TensorAllocator.cpp6
-rw-r--r--tests/validation/NEON/UNIT/TensorAllocator.cpp10
-rw-r--r--tests/validation/fixtures/UNIT/DynamicTensorFixture.h6
-rw-r--r--utils/GraphUtils.h60
-rw-r--r--utils/ImageLoader.h10
-rw-r--r--utils/Utils.h4
-rw-r--r--utils/command_line/CommandLineParser.h6
357 files changed, 982 insertions, 1409 deletions
diff --git a/3rdparty b/3rdparty
-Subproject ba65985c4a47effae4620b95b158ecae8764d2e
+Subproject 679eadd4df491b26b8b824690348e334ad588c7
diff --git a/SConstruct b/SConstruct
index 4be00f3070..395fb5e59d 100644
--- a/SConstruct
+++ b/SConstruct
@@ -146,7 +146,7 @@ if not env['exceptions']:
env.Append(CXXFLAGS = ['-Wall','-DARCH_ARM',
'-Wextra','-pedantic','-Wdisabled-optimization','-Wformat=2',
'-Winit-self','-Wstrict-overflow=2','-Wswitch-default',
- '-std=gnu++11','-Woverloaded-virtual', '-Wformat-security',
+ '-std=c++14','-Woverloaded-virtual', '-Wformat-security',
'-Wctor-dtor-privacy','-Wsign-promo','-Weffc++','-Wno-overlength-strings'])
env.Append(CPPDEFINES = ['_GLIBCXX_USE_NANOSLEEP'])
diff --git a/arm_compute/core/utils/logging/Helpers.h b/arm_compute/core/utils/logging/Helpers.h
index 08b8eb354a..5f8b948592 100644
--- a/arm_compute/core/utils/logging/Helpers.h
+++ b/arm_compute/core/utils/logging/Helpers.h
@@ -25,7 +25,6 @@
#define ARM_COMPUTE_LOGGING_HELPERS_H
#include "arm_compute/core/utils/logging/Types.h"
-#include "support/MemorySupport.h"
#include "support/ToolchainSupport.h"
#include <cstddef>
@@ -49,7 +48,7 @@ template <typename... Ts>
inline std::string string_with_format(const std::string &fmt, Ts &&... args)
{
size_t size = support::cpp11::snprintf(nullptr, 0, fmt.c_str(), args...) + 1;
- auto char_str = support::cpp14::make_unique<char[]>(size);
+ auto char_str = std::make_unique<char[]>(size);
support::cpp11::snprintf(char_str.get(), size, fmt.c_str(), args...);
return std::string(char_str.get(), char_str.get() + size - 1);
}
diff --git a/arm_compute/core/utils/logging/Macros.h b/arm_compute/core/utils/logging/Macros.h
index 6a1b7611ec..21ed721eb1 100644
--- a/arm_compute/core/utils/logging/Macros.h
+++ b/arm_compute/core/utils/logging/Macros.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Arm Limited.
+ * Copyright (c) 2017-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,7 @@
if(__logger != nullptr) \
{ \
size_t size = ::snprintf(nullptr, 0, fmt, __VA_ARGS__) + 1; \
- auto char_str = support::cpp14::make_unique<char[]>(size); \
+ auto char_str = std::make_unique<char[]>(size); \
::snprintf(char_str.get(), size, #fmt, __VA_ARGS__); \
__logger->log(log_level, std::string(char_str.get(), char_str.get() + size - 1)); \
} \
diff --git a/arm_compute/graph/Graph.h b/arm_compute/graph/Graph.h
index 0cdd8f8faa..d8d3feb1f7 100644
--- a/arm_compute/graph/Graph.h
+++ b/arm_compute/graph/Graph.h
@@ -238,7 +238,7 @@ inline NodeID Graph::add_node(Ts &&... args)
// Create node
NodeID nid = _nodes.size();
- auto node = support::cpp14::make_unique<NT>(std::forward<Ts>(args)...);
+ auto node = std::make_unique<NT>(std::forward<Ts>(args)...);
node->set_graph(this);
node->set_id(nid);
diff --git a/arm_compute/graph/TensorDescriptor.h b/arm_compute/graph/TensorDescriptor.h
index de67289bc8..5fa155efc8 100644
--- a/arm_compute/graph/TensorDescriptor.h
+++ b/arm_compute/graph/TensorDescriptor.h
@@ -27,7 +27,6 @@
#include "arm_compute/graph/Types.h"
#include "support/ICloneable.h"
-#include "support/MemorySupport.h"
#include <memory>
@@ -104,7 +103,7 @@ struct TensorDescriptor final : public misc::ICloneable<TensorDescriptor>
// Inherited methods overridden:
std::unique_ptr<TensorDescriptor> clone() const override
{
- return support::cpp14::make_unique<TensorDescriptor>(*this);
+ return std::make_unique<TensorDescriptor>(*this);
}
TensorShape shape{}; /**< Tensor shape */
diff --git a/arm_compute/graph/backends/BackendRegistry.h b/arm_compute/graph/backends/BackendRegistry.h
index c4414a23f6..7c11d35faf 100644
--- a/arm_compute/graph/backends/BackendRegistry.h
+++ b/arm_compute/graph/backends/BackendRegistry.h
@@ -26,7 +26,6 @@
#include "arm_compute/graph/IDeviceBackend.h"
#include "arm_compute/graph/Types.h"
-#include "support/MemorySupport.h"
#include <map>
#include <memory>
@@ -93,7 +92,7 @@ private:
template <typename T>
inline void BackendRegistry::add_backend(Target target)
{
- _registered_backends[target] = support::cpp14::make_unique<T>();
+ _registered_backends[target] = std::make_unique<T>();
}
} // namespace backends
} // namespace graph
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index e2904af0b5..05bd483cfd 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -113,7 +113,7 @@ std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
const ActivationLayerInfo act_info = node.activation_info();
// Create function
- auto func = support::cpp14::make_unique<ActivationLayerFunction>();
+ auto func = std::make_unique<ActivationLayerFunction>();
func->configure(input, output, act_info);
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
@@ -152,7 +152,7 @@ std::unique_ptr<IFunction> create_arg_min_max_layer(ArgMinMaxLayerNode &node)
unsigned int axis = node.axis();
// Create function
- auto func = support::cpp14::make_unique<ArgMinMaxLayerFunction>();
+ auto func = std::make_unique<ArgMinMaxLayerFunction>();
func->configure(input, axis, output, op);
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
@@ -194,7 +194,7 @@ std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLa
const ActivationLayerInfo fused_act = node.fused_activation();
// Create and configure function
- auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
+ auto func = std::make_unique<BatchNormalizationLayerFunction>();
func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
// Log info
@@ -346,7 +346,7 @@ std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransf
const BoundingBoxTransformInfo bbox_info = node.info();
// Create and configure function
- auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
+ auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
func->configure(input, output, deltas, bbox_info);
// Log info
@@ -383,7 +383,7 @@ std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode
const unsigned int num_groups = node.num_groups();
// Create function
- auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
+ auto func = std::make_unique<ChannelShuffleLayerFunction>();
func->configure(input, output, num_groups);
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
@@ -430,7 +430,7 @@ std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLaye
const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
// Create and configure function
- auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
+ auto func = std::make_unique<ConcatenateLayerFunction>();
func->configure(inputs, output, concat_axis);
// Log info
@@ -673,7 +673,7 @@ std::unique_ptr<IFunction> create_depth_to_space_layer(DepthToSpaceLayerNode &no
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<DepthToSpaceLayerFunction>();
+ auto func = std::make_unique<DepthToSpaceLayerFunction>();
func->configure(input, output, node.block_shape());
// Log info
@@ -712,7 +712,7 @@ std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
+ auto func = std::make_unique<DequantizationLayerFunction>();
func->configure(input, output);
// Log info
@@ -755,7 +755,7 @@ std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNod
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
+ auto func = std::make_unique<DetectionOutputLayerFunction>();
func->configure(input0, input1, input2, output, detect_info);
// Log info
@@ -807,7 +807,7 @@ std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProc
ARM_COMPUTE_ERROR_ON(output3 == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
+ auto func = std::make_unique<DetectionPostProcessLayerFunction>();
func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
// Log info
@@ -968,7 +968,7 @@ std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<FlattenLayerFunction>();
+ auto func = std::make_unique<FlattenLayerFunction>();
func->configure(input, output);
// Log info
@@ -1013,7 +1013,7 @@ std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode
// Create and configure function
auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
- auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
+ auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
func->configure(input, weights, biases, output, fc_info);
const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
@@ -1071,7 +1071,7 @@ std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLaye
ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
+ auto func = std::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
// Log info
@@ -1115,7 +1115,7 @@ std::unique_ptr<IFunction> create_l2_normalize_layer(L2NormalizeLayerNode &node,
// Create and configure function
auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
- auto func = support::cpp14::make_unique<L2NormalizeLayerFunction>(mm);
+ auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
func->configure(input, output, axis, epsilon);
// Log info
@@ -1158,7 +1158,7 @@ std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &no
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
+ auto func = std::make_unique<NormalizationLayerFunction>();
func->configure(input, output, norm_info);
// Log info
@@ -1200,7 +1200,7 @@ std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVL
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
+ auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
func->configure(input, output, mean, std);
// Log info
@@ -1238,7 +1238,7 @@ std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<PadLayerFunction>();
+ auto func = std::make_unique<PadLayerFunction>();
func->configure(input, output, padding, pad_value);
// Log info
@@ -1276,7 +1276,7 @@ std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<PermuteLayerFunction>();
+ auto func = std::make_unique<PermuteLayerFunction>();
func->configure(input, output, perm);
// Log info
@@ -1315,7 +1315,7 @@ std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<PoolingLayerFunction>();
+ auto func = std::make_unique<PoolingLayerFunction>();
func->configure(input, output, pool_info);
// Log info
@@ -1354,7 +1354,7 @@ std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<PReluFunction>();
+ auto func = std::make_unique<PReluFunction>();
func->configure(input, alpha, output);
// Log info
@@ -1423,7 +1423,7 @@ std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
+ auto func = std::make_unique<PriorBoxLayerFunction>();
func->configure(input0, input1, output, prior_info);
// Log info
@@ -1462,7 +1462,7 @@ std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
+ auto func = std::make_unique<QuantizationLayerFunction>();
func->configure(input, output);
// Log info
@@ -1503,7 +1503,7 @@ std::unique_ptr<IFunction> create_reduction_operation_layer(ReductionLayerNode &
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
+ auto func = std::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
func->configure(input, output, axis, op, keep_dims);
// Log info
@@ -1543,7 +1543,7 @@ std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<ReorgLayerFunction>();
+ auto func = std::make_unique<ReorgLayerFunction>();
func->configure(input, output, node.stride());
// Log info
@@ -1580,7 +1580,7 @@ std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
+ auto func = std::make_unique<ReshapeLayerFunction>();
func->configure(input, output);
// Log info
@@ -1618,7 +1618,7 @@ std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
const InterpolationPolicy policy = node.policy();
// Create and configure function
- auto func = support::cpp14::make_unique<ResizeLayerFunction>();
+ auto func = std::make_unique<ResizeLayerFunction>();
func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT });
// Log info
@@ -1660,7 +1660,7 @@ std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
const ROIPoolingLayerInfo pool_info = node.pooling_info();
// Create and configure function
- auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
+ auto func = std::make_unique<ROIAlignLayerFunction>();
func->configure(input, rois, output, pool_info);
@@ -1701,7 +1701,7 @@ std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<SliceLayerFunction>();
+ auto func = std::make_unique<SliceLayerFunction>();
func->configure(input, output, node.starts(), node.ends());
// Log info
@@ -1740,7 +1740,7 @@ std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphCon
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
+ auto func = std::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
func->configure(input, output, beta);
// Log info
@@ -1781,7 +1781,7 @@ std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
const int axis = node.axis();
// Create and configure function
- auto func = support::cpp14::make_unique<StackLayerFunction>();
+ auto func = std::make_unique<StackLayerFunction>();
func->configure(inputs, axis, output);
// Log info
@@ -1825,7 +1825,7 @@ std::unique_ptr<IFunction> create_strided_slice_layer(StridedSliceLayerNode &nod
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<StridedSliceLayerFunction>();
+ auto func = std::make_unique<StridedSliceLayerFunction>();
func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
// Log info
@@ -1868,7 +1868,7 @@ std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphC
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
+ auto func = std::make_unique<UpsampleLayerFunction>();
func->configure(input, output, info, upsampling_policy);
// Log info
@@ -1911,7 +1911,7 @@ std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<YOLOlayerFunction>();
+ auto func = std::make_unique<YOLOlayerFunction>();
func->configure(input, output, act_info, num_classes);
// Log info
diff --git a/arm_compute/graph/backends/Utils.h b/arm_compute/graph/backends/Utils.h
index 7d67f3b9e3..774ce515b5 100644
--- a/arm_compute/graph/backends/Utils.h
+++ b/arm_compute/graph/backends/Utils.h
@@ -44,7 +44,7 @@ namespace backends
template <typename FunctionType, typename FunctionNameType, typename... ParameterType>
std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_named_function(FunctionNameType name, ParameterType... args)
{
- auto f = arm_compute::support::cpp14::make_unique<FunctionType>();
+ auto f = std::make_unique<FunctionType>();
f->configure(std::forward<ParameterType>(args)...);
return std::make_pair(std::move(f), name);
}
@@ -62,7 +62,7 @@ std::tuple<std::unique_ptr<arm_compute::IFunction>, FunctionNameType> create_nam
MemoryManagerType mm,
ParameterType... args)
{
- auto f = arm_compute::support::cpp14::make_unique<FunctionType>(mm);
+ auto f = std::make_unique<FunctionType>(mm);
f->configure(std::forward<ParameterType>(args)...);
return std::make_pair(std::move(f), name);
}
diff --git a/arm_compute/graph/frontend/Layers.h b/arm_compute/graph/frontend/Layers.h
index 74c40126c8..23f503342b 100644
--- a/arm_compute/graph/frontend/Layers.h
+++ b/arm_compute/graph/frontend/Layers.h
@@ -300,12 +300,12 @@ public:
ConcatLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
: _sub_streams(), _concat_descriptor(DataLayoutDimension::CHANNEL)
{
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream1)));
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream2)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
utility::for_each([&](SubStream && sub_stream)
{
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
},
std::move(rest_sub_streams)...);
}
@@ -320,12 +320,12 @@ public:
ConcatLayer(descriptors::ConcatLayerDescriptor concat_descriptor, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
: _sub_streams(), _concat_descriptor(concat_descriptor)
{
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream1)));
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream2)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
utility::for_each([&](SubStream && sub_stream)
{
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
},
std::move(rest_sub_streams)...);
}
@@ -337,7 +337,7 @@ public:
ConcatLayer(SubStream &&sub_stream)
: _sub_streams(), _concat_descriptor(DataLayoutDimension::CHANNEL)
{
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
}
NodeID create_layer(IStream &s) override
{
@@ -754,8 +754,8 @@ public:
: _num_outputs(num_outputs),
_weights(nullptr),
_bias(nullptr),
- _weights_ss(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream_weights))),
- _bias_ss(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream_bias))),
+ _weights_ss(std::make_unique<SubStream>(std::move(sub_stream_weights))),
+ _bias_ss(std::make_unique<SubStream>(std::move(sub_stream_bias))),
_fc_info(fc_info),
_weights_quant_info(std::move(weights_quant_info)),
_out_quant_info(std::move(out_quant_info))
@@ -1357,12 +1357,12 @@ public:
StackLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
: _sub_streams(), _axis(0)
{
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream1)));
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream2)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
utility::for_each([&](SubStream && sub_stream)
{
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
},
std::move(rest_sub_streams)...);
}
@@ -1377,12 +1377,12 @@ public:
StackLayer(int axis, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
: _sub_streams(), _axis(axis)
{
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream1)));
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream2)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream1)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream2)));
utility::for_each([&](SubStream && sub_stream)
{
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
},
std::move(rest_sub_streams)...);
}
@@ -1394,7 +1394,7 @@ public:
StackLayer(SubStream &&sub_stream)
: _sub_streams(), _axis(0)
{
- _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
+ _sub_streams.push_back(std::make_unique<SubStream>(std::move(sub_stream)));
}
NodeID create_layer(IStream &s) override
{
diff --git a/arm_compute/runtime/Array.h b/arm_compute/runtime/Array.h
index 817d97a64d..5b98b6c2bc 100644
--- a/arm_compute/runtime/Array.h
+++ b/arm_compute/runtime/Array.h
@@ -26,7 +26,6 @@
#include "arm_compute/core/IArray.h"
#include "arm_compute/core/Types.h"
-#include "support/MemorySupport.h"
#include <memory>
@@ -47,7 +46,7 @@ public:
* @param[in] max_num_values Maximum number of values the array will be able to stored
*/
Array(size_t max_num_values)
- : IArray<T>(max_num_values), _values(arm_compute::support::cpp14::make_unique<T[]>(max_num_values))
+ : IArray<T>(max_num_values), _values(std::make_unique<T[]>(max_num_values))
{
}
diff --git a/arm_compute/runtime/CL/tuners/CLLWSList.h b/arm_compute/runtime/CL/tuners/CLLWSList.h
index 48f3f3f7c9..fe63754dd0 100644
--- a/arm_compute/runtime/CL/tuners/CLLWSList.h
+++ b/arm_compute/runtime/CL/tuners/CLLWSList.h
@@ -30,7 +30,7 @@
#include "arm_compute/runtime/CL/CLTunerTypes.h"
#include "support/ToolchainSupport.h"
-#include "support/MemorySupport.h"
+#include <memory>
namespace arm_compute
{
@@ -199,11 +199,11 @@ public:
switch(mode)
{
case CLTunerMode::EXHAUSTIVE:
- return arm_compute::support::cpp14::make_unique<CLLWSListExhaustive>(gws);
+ return std::make_unique<CLLWSListExhaustive>(gws);
case CLTunerMode::NORMAL:
- return arm_compute::support::cpp14::make_unique<CLLWSListNormal>(gws);
+ return std::make_unique<CLLWSListNormal>(gws);
case CLTunerMode::RAPID:
- return arm_compute::support::cpp14::make_unique<CLLWSListRapid>(gws);
+ return std::make_unique<CLLWSListRapid>(gws);
default:
return nullptr;
}
diff --git a/arm_compute/runtime/CL/tuners/Tuners.h b/arm_compute/runtime/CL/tuners/Tuners.h
index dd1c62a252..3ba9e0071d 100644
--- a/arm_compute/runtime/CL/tuners/Tuners.h
+++ b/arm_compute/runtime/CL/tuners/Tuners.h
@@ -27,8 +27,6 @@
#include "arm_compute/runtime/CL/tuners/BifrostTuner.h"
#include "arm_compute/runtime/CL/tuners/MidgardTuner.h"
-#include "support/MemorySupport.h"
-
#include <memory>
namespace arm_compute
@@ -45,9 +43,9 @@ public:
switch(arch)
{
case GPUTarget::BIFROST:
- return support::cpp14::make_unique<BifrostTuner>();
+ return std::make_unique<BifrostTuner>();
case GPUTarget::MIDGARD:
- return support::cpp14::make_unique<MidgardTuner>();
+ return std::make_unique<MidgardTuner>();
default:
return nullptr;
}
diff --git a/arm_compute/runtime/MemoryRegion.h b/arm_compute/runtime/MemoryRegion.h
index 63feabd281..6408deceaa 100644
--- a/arm_compute/runtime/MemoryRegion.h
+++ b/arm_compute/runtime/MemoryRegion.h
@@ -27,7 +27,6 @@
#include "arm_compute/runtime/IMemoryRegion.h"
#include "arm_compute/core/Error.h"
-#include "support/MemorySupport.h"
#include <cstddef>
@@ -59,7 +58,7 @@ public:
if(alignment != 0)
{
void *aligned_ptr = _mem.get();
- support::cpp11::align(alignment, size, aligned_ptr, space);
+ std::align(alignment, size, aligned_ptr, space);
_ptr = aligned_ptr;
}
}
@@ -94,7 +93,7 @@ public:
{
if(_ptr != nullptr && (offset < _size) && (_size - offset >= size))
{
- return support::cpp14::make_unique<MemoryRegion>(static_cast<uint8_t *>(_ptr) + offset, size);
+ return std::make_unique<MemoryRegion>(static_cast<uint8_t *>(_ptr) + offset, size);
}
else
{
diff --git a/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h b/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h
index fcabc1d0c4..e0054bceff 100644
--- a/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEQLSTMLayer.h
@@ -33,9 +33,8 @@
#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
#include "arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h"
#include "arm_compute/runtime/NEON/functions/NETranspose.h"
-#include "support/MemorySupport.h"
-
#include "arm_compute/runtime/common/LSTMParams.h"
+
#include <memory>
namespace arm_compute
diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox
index 8eb0762f9f..7fe73c42f0 100644
--- a/docs/00_introduction.dox
+++ b/docs/00_introduction.dox
@@ -86,6 +86,9 @@ If there is more than one release in a month then an extra sequential number is
@subsection S2_2_changelog Changelog
+v21.02 Public major release
+ - Upgraded C++ standard to C++14
+
v20.11 Public major release
- Various bug fixes.
- Various optimisations.
@@ -1483,29 +1486,29 @@ The examples get automatically built by scons as part of the build process of th
To cross compile a NEON example for Linux 32bit:
- arm-linux-gnueabihf-g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -mfpu=neon -L. -larm_compute -larm_compute_core -o neon_convolution
+ arm-linux-gnueabihf-g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute -larm_compute_core -o neon_convolution
To cross compile a NEON example for Linux 64bit:
- aarch64-linux-gnu-g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -L. -larm_compute -larm_compute_core -o neon_convolution
+ aarch64-linux-gnu-g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -L. -larm_compute -larm_compute_core -o neon_convolution
(notice the only difference with the 32 bit command is that we don't need the -mfpu option and the compiler's name is different)
To cross compile an OpenCL example for Linux 32bit:
- arm-linux-gnueabihf-g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -mfpu=neon -L. -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
+ arm-linux-gnueabihf-g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
To cross compile an OpenCL example for Linux 64bit:
- aarch64-linux-gnu-g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -L. -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
+ aarch64-linux-gnu-g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -L. -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
To cross compile a GLES example for Linux 32bit:
- arm-linux-gnueabihf-g++ examples/gc_absdiff.cpp utils/Utils.cpp -I. -Iinclude/ -L. -larm_compute -larm_compute_core -std=c++11 -mfpu=neon -DARM_COMPUTE_GC -Iinclude/linux/ -o gc_absdiff
+ arm-linux-gnueabihf-g++ examples/gc_absdiff.cpp utils/Utils.cpp -I. -Iinclude/ -L. -larm_compute -larm_compute_core -std=c++14 -mfpu=neon -DARM_COMPUTE_GC -Iinclude/linux/ -o gc_absdiff
To cross compile a GLES example for Linux 64bit:
- aarch64-linux-gnu-g++ examples/gc_absdiff.cpp utils/Utils.cpp -I. -Iinclude/ -L. -larm_compute -larm_compute_core -std=c++11 -DARM_COMPUTE_GC -Iinclude/linux/ -o gc_absdiff
+ aarch64-linux-gnu-g++ examples/gc_absdiff.cpp utils/Utils.cpp -I. -Iinclude/ -L. -larm_compute -larm_compute_core -std=c++14 -DARM_COMPUTE_GC -Iinclude/linux/ -o gc_absdiff
(notice the only difference with the 32 bit command is that we don't need the -mfpu option and the compiler's name is different)
@@ -1513,11 +1516,11 @@ To cross compile the examples with the Graph API, such as graph_lenet.cpp, you n
i.e. to cross compile the "graph_lenet" example for Linux 32bit:
- arm-linux-gnueabihf-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+ arm-linux-gnueabihf-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
i.e. to cross compile the "graph_lenet" example for Linux 64bit:
- aarch64-linux-gnu-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+ aarch64-linux-gnu-g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
(notice the only difference with the 32 bit command is that we don't need the -mfpu option and the compiler's name is different)
@@ -1525,31 +1528,31 @@ i.e. to cross compile the "graph_lenet" example for Linux 64bit:
To compile natively (i.e directly on an ARM device) for NEON for Linux 32bit:
- g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -mfpu=neon -larm_compute -larm_compute_core -o neon_convolution
+ g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -mfpu=neon -larm_compute -larm_compute_core -o neon_convolution
To compile natively (i.e directly on an ARM device) for NEON for Linux 64bit:
- g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -larm_compute -larm_compute_core -o neon_convolution
+ g++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute -larm_compute_core -o neon_convolution
(notice the only difference with the 32 bit command is that we don't need the -mfpu option)
To compile natively (i.e directly on an ARM device) for OpenCL for Linux 32bit or Linux 64bit:
- g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
+ g++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute -larm_compute_core -o cl_convolution -DARM_COMPUTE_CL
To compile natively (i.e directly on an ARM device) for GLES for Linux 32bit or Linux 64bit:
- g++ examples/gc_absdiff.cpp utils/Utils.cpp -I. -Iinclude/ -L. -larm_compute -larm_compute_core -std=c++11 -DARM_COMPUTE_GC -Iinclude/linux/ -o gc_absdiff
+ g++ examples/gc_absdiff.cpp utils/Utils.cpp -I. -Iinclude/ -L. -larm_compute -larm_compute_core -std=c++14 -DARM_COMPUTE_GC -Iinclude/linux/ -o gc_absdiff
To compile natively the examples with the Graph API, such as graph_lenet.cpp, you need to link the examples against arm_compute_graph.so too.
i.e. to natively compile the "graph_lenet" example for Linux 32bit:
- g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+ g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -mfpu=neon -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
i.e. to natively compile the "graph_lenet" example for Linux 64bit:
- g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
+ g++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -L. -larm_compute_graph -larm_compute -larm_compute_core -Wl,--allow-shlib-undefined -o graph_lenet
(notice the only difference with the 32 bit command is that we don't need the -mfpu option)
@@ -1623,30 +1626,30 @@ Once you've got your Android standalone toolchain built and added to your path y
To cross compile a NEON example:
#32 bit:
- arm-linux-androideabi-clang++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -larm_compute-static -larm_compute_core-static -L. -o neon_convolution_arm -static-libstdc++ -pie
+ arm-linux-androideabi-clang++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o neon_convolution_arm -static-libstdc++ -pie
#64 bit:
- aarch64-linux-android-clang++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -larm_compute-static -larm_compute_core-static -L. -o neon_convolution_aarch64 -static-libstdc++ -pie
+ aarch64-linux-android-clang++ examples/neon_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o neon_convolution_aarch64 -static-libstdc++ -pie
To cross compile an OpenCL example:
#32 bit:
- arm-linux-androideabi-clang++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -larm_compute-static -larm_compute_core-static -L. -o cl_convolution_arm -static-libstdc++ -pie -DARM_COMPUTE_CL
+ arm-linux-androideabi-clang++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o cl_convolution_arm -static-libstdc++ -pie -DARM_COMPUTE_CL
#64 bit:
- aarch64-linux-android-clang++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -larm_compute-static -larm_compute_core-static -L. -o cl_convolution_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_CL
+ aarch64-linux-android-clang++ examples/cl_convolution.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o cl_convolution_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_CL
To cross compile a GLES example:
#32 bit:
- arm-linux-androideabi-clang++ examples/gc_absdiff.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -larm_compute-static -larm_compute_core-static -L. -o gc_absdiff_arm -static-libstdc++ -pie -DARM_COMPUTE_GC
+ arm-linux-androideabi-clang++ examples/gc_absdiff.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o gc_absdiff_arm -static-libstdc++ -pie -DARM_COMPUTE_GC
#64 bit:
- aarch64-linux-android-clang++ examples/gc_absdiff.cpp utils/Utils.cpp -I. -Iinclude -std=c++11 -larm_compute-static -larm_compute_core-static -L. -o gc_absdiff_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_GC
+ aarch64-linux-android-clang++ examples/gc_absdiff.cpp utils/Utils.cpp -I. -Iinclude -std=c++14 -larm_compute-static -larm_compute_core-static -L. -o gc_absdiff_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_GC
To cross compile the examples with the Graph API, such as graph_lenet.cpp, you need to link the library arm_compute_graph also.
#32 bit:
- arm-linux-androideabi-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_arm -static-libstdc++ -pie -DARM_COMPUTE_CL
+ arm-linux-androideabi-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_arm -static-libstdc++ -pie -DARM_COMPUTE_CL
#64 bit:
- aarch64-linux-android-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++11 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_CL
+ aarch64-linux-android-clang++ examples/graph_lenet.cpp utils/Utils.cpp utils/GraphUtils.cpp utils/CommonGraphOptions.cpp -I. -Iinclude -std=c++14 -Wl,--whole-archive -larm_compute_graph-static -Wl,--no-whole-archive -larm_compute-static -larm_compute_core-static -L. -o graph_lenet_aarch64 -static-libstdc++ -pie -DARM_COMPUTE_CL
@note Due to some issues in older versions of the Mali OpenCL DDK (<= r13p0), we recommend to link arm_compute statically on Android.
@note When linked statically the arm_compute_graph library currently needs the --whole-archive linker flag in order to work properly
diff --git a/docs/05_contribution_guidelines.dox b/docs/05_contribution_guidelines.dox
index 1cdd129733..35b9f49dbc 100644
--- a/docs/05_contribution_guidelines.dox
+++ b/docs/05_contribution_guidelines.dox
@@ -391,7 +391,7 @@ In order to deprecate an existing API, these rules should be followed.
- Deprecation of runtime APIs should strictly follow the aforementioned period, whereas core APIs can have more flexibility as they are mostly used internally rather than user-facing.
- Any API changes (update, addition and deprecation) in all components should be well documented by the contribution itself.
-Also, it is recommended to use the following utility macros which is designed to work with both clang and gcc using C++11 and later.
+Also, it is recommended to use the following utility macros which is designed to work with both clang and gcc using C++14 and later.
- ARM_COMPUTE_DEPRECATED: Just deprecate the wrapped function
- ARM_COMPUTE_DEPRECATED_REL: Deprecate the wrapped function and also capture the release that was deprecated
diff --git a/docs/Doxyfile b/docs/Doxyfile
index bdc4b776d3..51548dc2d4 100644
--- a/docs/Doxyfile
+++ b/docs/Doxyfile
@@ -1082,7 +1082,7 @@ CLANG_ASSISTED_PARSING = NO
# specified with INPUT and INCLUDE_PATH.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
-CLANG_OPTIONS = -std=c++11
+CLANG_OPTIONS = -std=c++14
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
diff --git a/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp b/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp
index c6818e48b0..8323bbd971 100644
--- a/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp
+++ b/examples/gemm_tuner/cl_gemmlowp_reshaped_rhs_only_fused_output_stage_fixedpoint.cpp
@@ -280,7 +280,7 @@ public:
const TensorInfo info_vector_sum_row(compute_reductionB_shape(*lhs.info()), 1, DataType::S32);
vector_sum_row.allocator()->init(info_vector_sum_row);
- mtx_a_reduction = support::cpp14::make_unique<CLGEMMLowpMatrixAReduction>();
+ mtx_a_reduction = std::make_unique<CLGEMMLowpMatrixAReduction>();
if(!mtx_a_reduction->validate(lhs.info(), vector_sum_row.info(), GEMMLowpReductionKernelInfo{}))
{
diff --git a/examples/graph_alexnet.cpp b/examples/graph_alexnet.cpp
index 40bbee1d68..ce398be6cf 100644
--- a/examples/graph_alexnet.cpp
+++ b/examples/graph_alexnet.cpp
@@ -70,7 +70,7 @@ public:
// Create a preprocessor object
const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<CaffePreproccessor>(mean_rgb);
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_googlenet.cpp b/examples/graph_googlenet.cpp
index ed5cbd5120..0a53355611 100644
--- a/examples/graph_googlenet.cpp
+++ b/examples/graph_googlenet.cpp
@@ -66,7 +66,7 @@ public:
// Create a preprocessor object
const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<CaffePreproccessor>(mean_rgb);
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_inception_resnet_v1.cpp b/examples/graph_inception_resnet_v1.cpp
index 7c0bb0ce48..7a55733a20 100644
--- a/examples/graph_inception_resnet_v1.cpp
+++ b/examples/graph_inception_resnet_v1.cpp
@@ -92,7 +92,7 @@ public:
}
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>(0.f, 1.f);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>(0.f, 1.f);
// Create input descriptor
const auto operation_layout = common_params.data_layout;
@@ -207,7 +207,7 @@ public:
get_weights_accessor(data_path, "Logits_Logits_weights.npy", weights_layout),
get_weights_accessor(data_path, "Logits_Logits_biases.npy"))
.set_name("Logits/Logits")
- << OutputLayer(arm_compute::support::cpp14::make_unique<DummyAccessor>(0));
+ << OutputLayer(std::make_unique<DummyAccessor>(0));
// Finalize graph
GraphConfig config;
diff --git a/examples/graph_inception_resnet_v2.cpp b/examples/graph_inception_resnet_v2.cpp
index d14c34eb9d..60236d0780 100644
--- a/examples/graph_inception_resnet_v2.cpp
+++ b/examples/graph_inception_resnet_v2.cpp
@@ -76,7 +76,7 @@ public:
}
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>(0.f, 1.f);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>(0.f, 1.f);
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_inception_v3.cpp b/examples/graph_inception_v3.cpp
index 4b6dc8d296..5cacbcb6e1 100644
--- a/examples/graph_inception_v3.cpp
+++ b/examples/graph_inception_v3.cpp
@@ -62,7 +62,7 @@ public:
std::string data_path = common_params.data_path;
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_inception_v4.cpp b/examples/graph_inception_v4.cpp
index 553c96d3e4..db2a31047e 100644
--- a/examples/graph_inception_v4.cpp
+++ b/examples/graph_inception_v4.cpp
@@ -66,7 +66,7 @@ public:
std::string data_path = common_params.data_path;
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_mobilenet.cpp b/examples/graph_mobilenet.cpp
index f74d25189d..b73f7a2abd 100644
--- a/examples/graph_mobilenet.cpp
+++ b/examples/graph_mobilenet.cpp
@@ -124,7 +124,7 @@ private:
std::string model_path = (model_id == 0) ? "/cnn_data/mobilenet_v1_1_224_model/" : "/cnn_data/mobilenet_v1_075_160_model/";
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
// Get trainable parameters data path
std::string data_path = common_params.data_path;
diff --git a/examples/graph_mobilenet_v2.cpp b/examples/graph_mobilenet_v2.cpp
index 5ee1f7e52a..fa16c94645 100644
--- a/examples/graph_mobilenet_v2.cpp
+++ b/examples/graph_mobilenet_v2.cpp
@@ -129,7 +129,7 @@ private:
const std::string model_path = "/cnn_data/mobilenet_v2_1.0_224_model/";
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
// Get trainable parameters data path
std::string data_path = common_params.data_path;
diff --git a/examples/graph_resnet12.cpp b/examples/graph_resnet12.cpp
index badcaec107..ebd2e5dd16 100644
--- a/examples/graph_resnet12.cpp
+++ b/examples/graph_resnet12.cpp
@@ -81,7 +81,7 @@ public:
const std::string model_path = "/cnn_data/resnet12_model/";
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
// Create input descriptor
const TensorShape tensor_shape = permute_shape(TensorShape(image_width, image_height, 3U, 1U), DataLayout::NCHW, common_params.data_layout);
@@ -128,7 +128,7 @@ public:
.set_name("conv12/convolution")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH)).set_name("conv12/Tanh")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 0.58f, 0.5f)).set_name("conv12/Linear")
- << OutputLayer(arm_compute::support::cpp14::make_unique<DummyAccessor>(0));
+ << OutputLayer(std::make_unique<DummyAccessor>(0));
// Finalize graph
GraphConfig config;
diff --git a/examples/graph_resnet50.cpp b/examples/graph_resnet50.cpp
index 2939ee40c4..47d258ede7 100644
--- a/examples/graph_resnet50.cpp
+++ b/examples/graph_resnet50.cpp
@@ -63,8 +63,8 @@ public:
// Create a preprocessor object
const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb,
- false /* Do not convert to BGR */);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<CaffePreproccessor>(mean_rgb,
+ false /* Do not convert to BGR */);
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_resnet_v2_50.cpp b/examples/graph_resnet_v2_50.cpp
index 32434f55dd..921fb145d6 100644
--- a/examples/graph_resnet_v2_50.cpp
+++ b/examples/graph_resnet_v2_50.cpp
@@ -67,7 +67,7 @@ public:
}
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_shufflenet.cpp b/examples/graph_shufflenet.cpp
index 08f884b75f..300d0f15a1 100644
--- a/examples/graph_shufflenet.cpp
+++ b/examples/graph_shufflenet.cpp
@@ -89,7 +89,7 @@ public:
const DataLayout weights_layout = DataLayout::NCHW;
// Create preprocessor
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>(0);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>(0);
graph << common_params.target
<< common_params.fast_math_hint
diff --git a/examples/graph_squeezenet.cpp b/examples/graph_squeezenet.cpp
index f0d620c67d..2e72c14763 100644
--- a/examples/graph_squeezenet.cpp
+++ b/examples/graph_squeezenet.cpp
@@ -63,7 +63,7 @@ public:
// Create a preprocessor object
const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<CaffePreproccessor>(mean_rgb);
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_squeezenet_v1_1.cpp b/examples/graph_squeezenet_v1_1.cpp
index c60448639d..1708ac2f5a 100644
--- a/examples/graph_squeezenet_v1_1.cpp
+++ b/examples/graph_squeezenet_v1_1.cpp
@@ -63,7 +63,7 @@ public:
// Create a preprocessor object
const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<CaffePreproccessor>(mean_rgb);
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_srcnn955.cpp b/examples/graph_srcnn955.cpp
index a95f0c1d25..bcc3824c60 100644
--- a/examples/graph_srcnn955.cpp
+++ b/examples/graph_srcnn955.cpp
@@ -78,7 +78,7 @@ public:
const std::string model_path = "/cnn_data/srcnn955_model/";
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
// Create input descriptor
const TensorShape tensor_shape = permute_shape(TensorShape(image_width, image_height, 3U, 1U), DataLayout::NCHW, common_params.data_layout);
@@ -111,7 +111,7 @@ public:
PadStrideInfo(1, 1, 2, 2))
.set_name("conv3/convolution")
<< ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv3/Relu")
- << OutputLayer(arm_compute::support::cpp14::make_unique<DummyAccessor>(0));
+ << OutputLayer(std::make_unique<DummyAccessor>(0));
// Finalize graph
GraphConfig config;
diff --git a/examples/graph_ssd_mobilenet.cpp b/examples/graph_ssd_mobilenet.cpp
index edd4c94d02..f5af84f4d4 100644
--- a/examples/graph_ssd_mobilenet.cpp
+++ b/examples/graph_ssd_mobilenet.cpp
@@ -216,7 +216,7 @@ private:
{
// Create a preprocessor object
const std::array<float, 3> mean_rgb{ { 127.5f, 127.5f, 127.5f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb, true, 0.007843f);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<CaffePreproccessor>(mean_rgb, true, 0.007843f);
// Get trainable parameters data path
std::string data_path = common_params.data_path;
diff --git a/examples/graph_vgg16.cpp b/examples/graph_vgg16.cpp
index 990040b5ef..a4c5e6bbd2 100644
--- a/examples/graph_vgg16.cpp
+++ b/examples/graph_vgg16.cpp
@@ -63,7 +63,7 @@ public:
// Create a preprocessor object
const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<CaffePreproccessor>(mean_rgb);
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_vgg19.cpp b/examples/graph_vgg19.cpp
index 9215ba7b61..c95fb03368 100644
--- a/examples/graph_vgg19.cpp
+++ b/examples/graph_vgg19.cpp
@@ -62,7 +62,7 @@ public:
// Create a preprocessor object
const std::array<float, 3> mean_rgb{ { 123.68f, 116.779f, 103.939f } };
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<CaffePreproccessor>(mean_rgb);
// Create input descriptor
const auto operation_layout = common_params.data_layout;
diff --git a/examples/graph_vgg_vdsr.cpp b/examples/graph_vgg_vdsr.cpp
index 65c0642485..3fa7dd1330 100644
--- a/examples/graph_vgg_vdsr.cpp
+++ b/examples/graph_vgg_vdsr.cpp
@@ -79,7 +79,7 @@ public:
const std::string model_path = "/cnn_data/vdsr_model/";
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
// Create input descriptor
const TensorShape tensor_shape = permute_shape(TensorShape(image_width, image_height, 1U, 1U), DataLayout::NCHW, common_params.data_layout);
@@ -132,7 +132,7 @@ public:
// Add residual to input
graph << EltwiseLayer(std::move(left), std::move(right), EltwiseOperation::Add).set_name("add")
- << OutputLayer(arm_compute::support::cpp14::make_unique<DummyAccessor>(0));
+ << OutputLayer(std::make_unique<DummyAccessor>(0));
// Finalize graph
GraphConfig config;
diff --git a/examples/graph_yolov3.cpp b/examples/graph_yolov3.cpp
index c7f917ba6e..79d891a308 100644
--- a/examples/graph_yolov3.cpp
+++ b/examples/graph_yolov3.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 Arm Limited.
+ * Copyright (c) 2018-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,7 +66,7 @@ public:
std::string data_path = common_params.data_path;
// Create a preprocessor object
- std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>(0.f);
+ std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>(0.f);
// Create input descriptor
const TensorShape tensor_shape = permute_shape(TensorShape(608U, 608U, 3U, 1U), DataLayout::NCHW, common_params.data_layout);
diff --git a/examples/neon_cnn.cpp b/examples/neon_cnn.cpp
index 85f8792b9c..339c8c1a81 100644
--- a/examples/neon_cnn.cpp
+++ b/examples/neon_cnn.cpp
@@ -53,10 +53,10 @@ public:
// The weights and biases tensors should be initialized with the values inferred with the training
// Set memory manager where allowed to manage internal memory requirements
- conv0 = arm_compute::support::cpp14::make_unique<NEConvolutionLayer>(mm_layers);
- conv1 = arm_compute::support::cpp14::make_unique<NEConvolutionLayer>(mm_layers);
- fc0 = arm_compute::support::cpp14::make_unique<NEFullyConnectedLayer>(mm_layers);
- softmax = arm_compute::support::cpp14::make_unique<NESoftmaxLayer>(mm_layers);
+ conv0 = std::make_unique<NEConvolutionLayer>(mm_layers);
+ conv1 = std::make_unique<NEConvolutionLayer>(mm_layers);
+ fc0 = std::make_unique<NEFullyConnectedLayer>(mm_layers);
+ softmax = std::make_unique<NESoftmaxLayer>(mm_layers);
/* [Initialize tensors] */
@@ -170,8 +170,8 @@ public:
// We need 2 memory groups for handling the input and output
// We call explicitly allocate after manage() in order to avoid overlapping lifetimes
- memory_group0 = arm_compute::support::cpp14::make_unique<MemoryGroup>(mm_transitions);
- memory_group1 = arm_compute::support::cpp14::make_unique<MemoryGroup>(mm_transitions);
+ memory_group0 = std::make_unique<MemoryGroup>(mm_transitions);
+ memory_group1 = std::make_unique<MemoryGroup>(mm_transitions);
memory_group0->manage(&out_conv0);
out_conv0.allocator()->allocate();
diff --git a/scripts/clang_tidy_rules.py b/scripts/clang_tidy_rules.py
index ce467f8f55..1e24b042de 100755
--- a/scripts/clang_tidy_rules.py
+++ b/scripts/clang_tidy_rules.py
@@ -16,7 +16,7 @@ def get_list_includes():
def get_list_flags( filename, arch):
assert arch in ["armv7", "aarch64"]
- flags = ["-std=c++11"]
+ flags = ["-std=c++14"]
flags.append("-DARM_COMPUTE_CPP_SCHEDULER=1")
flags.append("-DARM_COMPUTE_CL")
flags.append("-DARM_COMPUTE_GC")
diff --git a/src/core/CL/gemm/native/CLGEMMNativeKernelConfiguration.h b/src/core/CL/gemm/native/CLGEMMNativeKernelConfiguration.h
index aecf5a8aa8..65396b1d98 100644
--- a/src/core/CL/gemm/native/CLGEMMNativeKernelConfiguration.h
+++ b/src/core/CL/gemm/native/CLGEMMNativeKernelConfiguration.h
@@ -29,7 +29,7 @@
#include "src/core/CL/gemm/native/CLGEMMNativeKernelConfigurationMidgard.h"
#include "src/core/CL/gemm/native/CLGEMMNativeKernelConfigurationValhall.h"
-#include "support/MemorySupport.h"
+#include <memory>
namespace arm_compute
{
@@ -50,11 +50,11 @@ public:
switch(get_arch_from_target(gpu))
{
case GPUTarget::MIDGARD:
- return support::cpp14::make_unique<CLGEMMNativeKernelConfigurationMidgard>(gpu);
+ return std::make_unique<CLGEMMNativeKernelConfigurationMidgard>(gpu);
case GPUTarget::BIFROST:
- return support::cpp14::make_unique<CLGEMMNativeKernelConfigurationBifrost>(gpu);
+ return std::make_unique<CLGEMMNativeKernelConfigurationBifrost>(gpu);
case GPUTarget::VALHALL:
- return support::cpp14::make_unique<CLGEMMNativeKernelConfigurationValhall>(gpu);
+ return std::make_unique<CLGEMMNativeKernelConfigurationValhall>(gpu);
default:
ARM_COMPUTE_ERROR("Not supported GPU target");
}
diff --git a/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h b/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h
index 21ccf2d647..2a25dc1893 100644
--- a/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h
+++ b/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h
@@ -28,7 +28,7 @@
#include "src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfigurationBifrost.h"
#include "src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfigurationValhall.h"
-#include "support/MemorySupport.h"
+#include <memory>
namespace arm_compute
{
@@ -50,9 +50,9 @@ public:
{
case GPUTarget::MIDGARD:
case GPUTarget::BIFROST:
- return support::cpp14::make_unique<CLGEMMReshapedKernelConfigurationBifrost>(gpu);
+ return std::make_unique<CLGEMMReshapedKernelConfigurationBifrost>(gpu);
case GPUTarget::VALHALL:
- return support::cpp14::make_unique<CLGEMMReshapedKernelConfigurationValhall>(gpu);
+ return std::make_unique<CLGEMMReshapedKernelConfigurationValhall>(gpu);
default:
ARM_COMPUTE_ERROR("Not supported GPU target");
}
diff --git a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h
index 4efe28ce69..96c3045119 100644
--- a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h
+++ b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h
@@ -28,7 +28,7 @@
#include "src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfigurationBifrost.h"
#include "src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfigurationValhall.h"
-#include "support/MemorySupport.h"
+#include <memory>
namespace arm_compute
{
@@ -50,9 +50,9 @@ public:
{
case GPUTarget::MIDGARD:
case GPUTarget::BIFROST:
- return support::cpp14::make_unique<CLGEMMReshapedOnlyRHSKernelConfigurationBifrost>(gpu);
+ return std::make_unique<CLGEMMReshapedOnlyRHSKernelConfigurationBifrost>(gpu);
case GPUTarget::VALHALL:
- return support::cpp14::make_unique<CLGEMMReshapedOnlyRHSKernelConfigurationValhall>(gpu);
+ return std::make_unique<CLGEMMReshapedOnlyRHSKernelConfigurationValhall>(gpu);
default:
ARM_COMPUTE_ERROR("Not supported GPU target");
}
diff --git a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
index 211ebdec90..f5a0b370ab 100644
--- a/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.cpp
@@ -33,11 +33,11 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "src/core/AccessWindowStatic.h"
#include "src/core/NEON/kernels/convolution/common/utils.hpp"
+#include "src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-#include "support/MemorySupport.h"
-#include "src/core/NEON/kernels/convolution/winograd/winograd_layer.hpp"
+#include <memory>
namespace arm_compute
{
@@ -225,7 +225,7 @@ void NEWinogradLayerTransformWeightsKernel<T, OutputTileRows, OutputTileCols, Ke
_matrix_stride = matrix_stride;
_num_output_channels = num_output_channels;
_num_input_channels = num_input_channels;
- _transform = arm_compute::support::cpp14::make_unique<WeightsTransform>(num_output_channels, num_input_channels);
+ _transform = std::make_unique<WeightsTransform>(num_output_channels, num_input_channels);
Window win;
auto win_last = _transform->get_window();
@@ -348,7 +348,7 @@ void NEWinogradLayerTransformInputKernel<T, OutputTileRows, OutputTileCols, Kern
_padding_bottom = (padding == PADDING_SAME) ? iceildiv(KernelRows - 1, 2) : 0;
_padding_right = (padding == PADDING_SAME) ? iceildiv(KernelCols - 1, 2) : 0;
- _transform = arm_compute::support::cpp14::make_unique<InputTransform>(
+ _transform = std::make_unique<InputTransform>(
KernelRows,
KernelCols,
num_batches,
@@ -492,7 +492,7 @@ void NEWinogradLayerTransformOutputKernel<T, OutputTileRows, OutputTileCols, Ker
_num_cols = num_cols;
_num_channels = num_channels;
// We don't have the biases buffer at this stage as it hasn't been allocated, we pass in nullptr OutputTransform is only used here to compute the window
- _transform = arm_compute::support::cpp14::make_unique<OutputTransform>(num_batches, num_rows, num_cols, num_channels, activation);
+ _transform = std::make_unique<OutputTransform>(num_batches, num_rows, num_cols, num_channels, activation);
Window win;
auto win_last = _transform->get_window();
win.set(Window::DimX, Window::Dimension(0, win_last, 1));
diff --git a/src/core/TensorInfo.cpp b/src/core/TensorInfo.cpp
index 414c128a27..7b1f9c542a 100644
--- a/src/core/TensorInfo.cpp
+++ b/src/core/TensorInfo.cpp
@@ -29,7 +29,8 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include "src/core/helpers/Utils.h"
-#include "support/MemorySupport.h"
+
+#include <memory>
using namespace arm_compute;
@@ -314,7 +315,7 @@ bool TensorInfo::extend_padding(const PaddingSize &padding)
std::unique_ptr<ITensorInfo> TensorInfo::clone() const
{
- return support::cpp14::make_unique<TensorInfo>(*this);
+ return std::make_unique<TensorInfo>(*this);
}
ITensorInfo &TensorInfo::set_data_type(DataType data_type)
diff --git a/src/core/utils/logging/Logger.cpp b/src/core/utils/logging/Logger.cpp
index 05c5fa07d0..70b5868da8 100644
--- a/src/core/utils/logging/Logger.cpp
+++ b/src/core/utils/logging/Logger.cpp
@@ -24,7 +24,8 @@
#include "arm_compute/core/utils/logging/Logger.h"
#include "arm_compute/core/Error.h"
-#include "support/MemorySupport.h"
+
+#include <memory>
using namespace arm_compute::logging;
@@ -116,9 +117,9 @@ void Logger::add_decorator(std::unique_ptr<IDecorator> decorator)
void Logger::set_default_decorators()
{
- _decorators.emplace_back(support::cpp14::make_unique<StringDecorator>(_name));
- _decorators.emplace_back(support::cpp14::make_unique<DateDecorator>());
- _decorators.emplace_back(support::cpp14::make_unique<LogLevelDecorator>());
+ _decorators.emplace_back(std::make_unique<StringDecorator>(_name));
+ _decorators.emplace_back(std::make_unique<DateDecorator>());
+ _decorators.emplace_back(std::make_unique<LogLevelDecorator>());
}
bool Logger::is_loggable(LogLevel log_level)
diff --git a/src/graph/Graph.cpp b/src/graph/Graph.cpp
index af75eacc02..4ce53589d4 100644
--- a/src/graph/Graph.cpp
+++ b/src/graph/Graph.cpp
@@ -96,7 +96,7 @@ EdgeID Graph::add_connection(NodeID source, size_t source_idx, NodeID sink, size
// Create connections
EdgeID eid = _edges.size();
- auto connection = arm_compute::support::cpp14::make_unique<Edge>(eid, source_node.get(), source_idx, sink_node.get(), sink_idx, tensor.get());
+ auto connection = std::make_unique<Edge>(eid, source_node.get(), source_idx, sink_node.get(), sink_idx, tensor.get());
_edges.push_back(std::move(connection));
// Add connections to source and sink nodes
@@ -155,7 +155,7 @@ bool Graph::remove_connection(EdgeID eid)
TensorID Graph::create_tensor(const TensorDescriptor &desc)
{
TensorID tid = _tensors.size();
- auto tensor = support::cpp14::make_unique<Tensor>(tid, desc);
+ auto tensor = std::make_unique<Tensor>(tid, desc);
_tensors.push_back(std::move(tensor));
return tid;
diff --git a/src/graph/Utils.cpp b/src/graph/Utils.cpp
index 64890585e0..2835af311a 100644
--- a/src/graph/Utils.cpp
+++ b/src/graph/Utils.cpp
@@ -83,16 +83,16 @@ PassManager create_default_pass_manager(Target target, const GraphConfig &cfg)
// Passes that mutate graph IR
if(cfg.convert_to_uint8)
{
- pm.append(support::cpp14::make_unique<SyntheticDataTypeMutator>(), !is_target_gc);
+ pm.append(std::make_unique<SyntheticDataTypeMutator>(), !is_target_gc);
}
- pm.append(support::cpp14::make_unique<NodeFusionMutator>(), !is_target_gc);
- pm.append(support::cpp14::make_unique<GroupedConvolutionMutator>());
- pm.append(support::cpp14::make_unique<InPlaceOperationMutator>(), !is_target_gc);
+ pm.append(std::make_unique<NodeFusionMutator>(), !is_target_gc);
+ pm.append(std::make_unique<GroupedConvolutionMutator>());
+ pm.append(std::make_unique<InPlaceOperationMutator>(), !is_target_gc);
// Passes that mutate backend information
- pm.append(support::cpp14::make_unique<DepthConcatSubTensorMutator>(), !is_target_gc);
- pm.append(support::cpp14::make_unique<SplitLayerSubTensorMutator>(), !is_target_gc);
- pm.append(support::cpp14::make_unique<NodeExecutionMethodMutator>());
+ pm.append(std::make_unique<DepthConcatSubTensorMutator>(), !is_target_gc);
+ pm.append(std::make_unique<SplitLayerSubTensorMutator>(), !is_target_gc);
+ pm.append(std::make_unique<NodeExecutionMethodMutator>());
return pm;
}
diff --git a/src/graph/backends/CL/CLDeviceBackend.cpp b/src/graph/backends/CL/CLDeviceBackend.cpp
index b2d58e35be..bc7bbddbd8 100644
--- a/src/graph/backends/CL/CLDeviceBackend.cpp
+++ b/src/graph/backends/CL/CLDeviceBackend.cpp
@@ -93,7 +93,7 @@ void CLDeviceBackend::initialize_backend()
// Setup Scheduler
CLScheduler::get().default_init(&_tuner);
// Create allocator with new context
- _allocator = support::cpp14::make_unique<CLBufferAllocator>(nullptr /* legacy path for CLCoreRuntimeContext */);
+ _allocator = std::make_unique<CLBufferAllocator>(nullptr /* legacy path for CLCoreRuntimeContext */);
}
void CLDeviceBackend::release_backend_context(GraphContext &ctx)
@@ -170,7 +170,7 @@ std::unique_ptr<ITensorHandle> CLDeviceBackend::create_tensor(const Tensor &tens
TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
info.set_data_layout(tensor_desc.layout);
- return support::cpp14::make_unique<CLTensorHandle>(info);
+ return std::make_unique<CLTensorHandle>(info);
}
std::unique_ptr<ITensorHandle> CLDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent)
@@ -180,7 +180,7 @@ std::unique_ptr<ITensorHandle> CLDeviceBackend::create_subtensor(ITensorHandle *
return nullptr;
}
- return support::cpp14::make_unique<CLSubTensorHandle>(parent, shape, coords, extend_parent);
+ return std::make_unique<CLSubTensorHandle>(parent, shape, coords, extend_parent);
}
std::unique_ptr<arm_compute::IFunction> CLDeviceBackend::configure_node(INode &node, GraphContext &ctx)
diff --git a/src/graph/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp
index 98013b9e49..641dcc36ce 100644
--- a/src/graph/backends/CL/CLFunctionsFactory.cpp
+++ b/src/graph/backends/CL/CLFunctionsFactory.cpp
@@ -143,7 +143,7 @@ std::unique_ptr<IFunction> create_detection_output_layer<CPPDetectionOutputLayer
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<CPPDetectionOutputLayer>();
+ auto func = std::make_unique<CPPDetectionOutputLayer>();
func->configure(input0, input1, input2, output, detect_info);
// Log info
@@ -159,7 +159,7 @@ std::unique_ptr<IFunction> create_detection_output_layer<CPPDetectionOutputLayer
<< " DetectionOutputLayer info: " << detect_info
<< std::endl);
- auto wrap_function = support::cpp14::make_unique<CPPWrapperFunction>();
+ auto wrap_function = std::make_unique<CPPWrapperFunction>();
wrap_function->register_function(std::move(func));
wrap_function->register_tensor(input0);
@@ -193,7 +193,7 @@ std::unique_ptr<IFunction> create_detection_post_process_layer<CPPDetectionPostP
ARM_COMPUTE_ERROR_ON(output3 == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<CPPDetectionPostProcessLayer>();
+ auto func = std::make_unique<CPPDetectionPostProcessLayer>();
func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
// Log info
@@ -212,7 +212,7 @@ std::unique_ptr<IFunction> create_detection_post_process_layer<CPPDetectionPostP
<< " DetectionPostProcessLayer info: " << detect_info
<< std::endl);
- auto wrap_function = support::cpp14::make_unique<CPPWrapperFunction>();
+ auto wrap_function = std::make_unique<CPPWrapperFunction>();
wrap_function->register_function(std::move(func));
wrap_function->register_tensor(input0);
diff --git a/src/graph/backends/GLES/GCDeviceBackend.cpp b/src/graph/backends/GLES/GCDeviceBackend.cpp
index 252093cf2e..dcab2a5697 100644
--- a/src/graph/backends/GLES/GCDeviceBackend.cpp
+++ b/src/graph/backends/GLES/GCDeviceBackend.cpp
@@ -112,7 +112,7 @@ std::unique_ptr<ITensorHandle> GCDeviceBackend::create_tensor(const Tensor &tens
TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
info.set_data_layout(tensor_desc.layout);
- return support::cpp14::make_unique<GCTensorHandle>(info);
+ return std::make_unique<GCTensorHandle>(info);
}
std::unique_ptr<ITensorHandle> GCDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent)
diff --git a/src/graph/backends/NEON/NEDeviceBackend.cpp b/src/graph/backends/NEON/NEDeviceBackend.cpp
index adb87a952b..7f87710cf3 100644
--- a/src/graph/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph/backends/NEON/NEDeviceBackend.cpp
@@ -123,7 +123,7 @@ std::unique_ptr<ITensorHandle> NEDeviceBackend::create_tensor(const Tensor &tens
TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type, tensor_desc.quant_info);
info.set_data_layout(tensor_desc.layout);
- return support::cpp14::make_unique<NETensorHandle>(info);
+ return std::make_unique<NETensorHandle>(info);
}
std::unique_ptr<ITensorHandle> NEDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent)
@@ -133,7 +133,7 @@ std::unique_ptr<ITensorHandle> NEDeviceBackend::create_subtensor(ITensorHandle *
return nullptr;
}
- return support::cpp14::make_unique<NESubTensorHandle>(parent, shape, coords, extend_parent);
+ return std::make_unique<NESubTensorHandle>(parent, shape, coords, extend_parent);
}
std::unique_ptr<arm_compute::IFunction> NEDeviceBackend::configure_node(INode &node, GraphContext &ctx)
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index ec06f3fa30..d070433e4d 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -102,7 +102,7 @@ std::unique_ptr<IFunction> create_normalization_layer<NENormalizationLayer, NETa
ARM_COMPUTE_ERROR_ON(output == nullptr);
// Create and configure function
- auto func = support::cpp14::make_unique<NENormalizationLayer>(get_memory_manager(ctx, NETargetInfo::TargetType));
+ auto func = std::make_unique<NENormalizationLayer>(get_memory_manager(ctx, NETargetInfo::TargetType));
func->configure(input, output, norm_info);
// Log info
diff --git a/src/graph/mutators/SyntheticDataTypeMutator.cpp b/src/graph/mutators/SyntheticDataTypeMutator.cpp
index 532c0e821b..21bafa61e1 100644
--- a/src/graph/mutators/SyntheticDataTypeMutator.cpp
+++ b/src/graph/mutators/SyntheticDataTypeMutator.cpp
@@ -222,7 +222,7 @@ void handle_nodes_with_bias(Graph &g)
auto depth = b_desc.shape[get_dimension_idx(b_desc.layout, DataLayoutDimension::BATCHES)];
b_desc.shape = TensorShape(depth);
- auto accessor = support::cpp14::make_unique<EmptyAccessor>();
+ auto accessor = std::make_unique<EmptyAccessor>();
auto b_nid = GraphBuilder::add_const_node(g, params, b_desc, std::move(accessor));
g.add_connection(b_nid, 0, node_id, 2);
}
diff --git a/src/runtime/Allocator.cpp b/src/runtime/Allocator.cpp
index 12478be482..ef7c62d64b 100644
--- a/src/runtime/Allocator.cpp
+++ b/src/runtime/Allocator.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/runtime/MemoryRegion.h"
#include "arm_compute/core/Error.h"
-#include "support/MemorySupport.h"
#include <cstddef>
@@ -44,5 +43,5 @@ void Allocator::free(void *ptr)
std::unique_ptr<IMemoryRegion> Allocator::make_region(size_t size, size_t alignment)
{
- return arm_compute::support::cpp14::make_unique<MemoryRegion>(size, alignment);
+ return std::make_unique<MemoryRegion>(size, alignment);
}
diff --git a/src/runtime/BlobLifetimeManager.cpp b/src/runtime/BlobLifetimeManager.cpp
index 08f46e5012..1c983aa329 100644
--- a/src/runtime/BlobLifetimeManager.cpp
+++ b/src/runtime/BlobLifetimeManager.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/runtime/BlobMemoryPool.h"
#include "arm_compute/runtime/IAllocator.h"
#include "arm_compute/runtime/IMemoryGroup.h"
-#include "support/MemorySupport.h"
#include <algorithm>
#include <cmath>
@@ -48,7 +47,7 @@ const BlobLifetimeManager::info_type &BlobLifetimeManager::info() const
std::unique_ptr<IMemoryPool> BlobLifetimeManager::create_pool(IAllocator *allocator)
{
ARM_COMPUTE_ERROR_ON(allocator == nullptr);
- return support::cpp14::make_unique<BlobMemoryPool>(allocator, _blobs);
+ return std::make_unique<BlobMemoryPool>(allocator, _blobs);
}
MappingType BlobLifetimeManager::mapping_type() const
diff --git a/src/runtime/BlobMemoryPool.cpp b/src/runtime/BlobMemoryPool.cpp
index 88bb421e34..e3d7f0fb65 100644
--- a/src/runtime/BlobMemoryPool.cpp
+++ b/src/runtime/BlobMemoryPool.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/runtime/IAllocator.h"
#include "arm_compute/runtime/IMemoryPool.h"
#include "arm_compute/runtime/Types.h"
-#include "support/MemorySupport.h"
#include <vector>
@@ -73,7 +72,7 @@ MappingType BlobMemoryPool::mapping_type() const
std::unique_ptr<IMemoryPool> BlobMemoryPool::duplicate()
{
ARM_COMPUTE_ERROR_ON(!_allocator);
- return support::cpp14::make_unique<BlobMemoryPool>(_allocator, _blob_info);
+ return std::make_unique<BlobMemoryPool>(_allocator, _blob_info);
}
void BlobMemoryPool::allocate_blobs(const std::vector<BlobInfo> &blob_info)
diff --git a/src/runtime/CL/CLBufferAllocator.cpp b/src/runtime/CL/CLBufferAllocator.cpp
index 3d380199e5..3673d65111 100644
--- a/src/runtime/CL/CLBufferAllocator.cpp
+++ b/src/runtime/CL/CLBufferAllocator.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/CL/CLMemoryRegion.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "support/MemorySupport.h"
#include <cstddef>
@@ -63,6 +62,6 @@ void CLBufferAllocator::free(void *ptr)
std::unique_ptr<IMemoryRegion> CLBufferAllocator::make_region(size_t size, size_t alignment)
{
ARM_COMPUTE_UNUSED(alignment);
- return arm_compute::support::cpp14::make_unique<CLBufferMemoryRegion>(_ctx, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
+ return std::make_unique<CLBufferMemoryRegion>(_ctx, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
}
} // namespace arm_compute
diff --git a/src/runtime/CL/CLRuntimeContext.cpp b/src/runtime/CL/CLRuntimeContext.cpp
index 571e30931c..9d46126ee4 100644
--- a/src/runtime/CL/CLRuntimeContext.cpp
+++ b/src/runtime/CL/CLRuntimeContext.cpp
@@ -26,12 +26,10 @@
#include "arm_compute/runtime/CL/CLHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
CLRuntimeContext::CLRuntimeContext()
- : _gpu_owned_scheduler(support::cpp14::make_unique<CLScheduler>()), _gpu_scheduler(_gpu_owned_scheduler.get()), _symbols(), _core_context()
+ : _gpu_owned_scheduler(std::make_unique<CLScheduler>()), _gpu_scheduler(_gpu_owned_scheduler.get()), _symbols(), _core_context()
{
_symbols.load_default();
auto ctx_dev_err = create_opencl_context_and_device();
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index f37fc779fe..fc789fa4b9 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -28,8 +28,6 @@
#include "arm_compute/runtime/CL/CLRuntimeContext.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer();
@@ -47,20 +45,20 @@ namespace
std::unique_ptr<ICLMemoryRegion> allocate_region(CLCoreRuntimeContext *ctx, size_t size, cl_uint alignment)
{
// Try fine-grain SVM
- std::unique_ptr<ICLMemoryRegion> region = support::cpp14::make_unique<CLFineSVMMemoryRegion>(ctx,
- CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER,
- size,
- alignment);
+ std::unique_ptr<ICLMemoryRegion> region = std::make_unique<CLFineSVMMemoryRegion>(ctx,
+ CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER,
+ size,
+ alignment);
// Try coarse-grain SVM in case of failure
if(region != nullptr && region->ptr() == nullptr)
{
- region = support::cpp14::make_unique<CLCoarseSVMMemoryRegion>(ctx, CL_MEM_READ_WRITE, size, alignment);
+ region = std::make_unique<CLCoarseSVMMemoryRegion>(ctx, CL_MEM_READ_WRITE, size, alignment);
}
// Try legacy buffer memory in case of failure
if(region != nullptr && region->ptr() == nullptr)
{
- region = support::cpp14::make_unique<CLBufferMemoryRegion>(ctx, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
+ region = std::make_unique<CLBufferMemoryRegion>(ctx, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
}
return region;
}
@@ -176,11 +174,11 @@ Status CLTensorAllocator::import_memory(cl::Buffer buffer)
if(_ctx == nullptr)
{
auto legacy_ctx = CLCoreRuntimeContext(nullptr, CLScheduler::get().context(), CLScheduler::get().queue());
- _memory.set_owned_region(support::cpp14::make_unique<CLBufferMemoryRegion>(buffer, &legacy_ctx));
+ _memory.set_owned_region(std::make_unique<CLBufferMemoryRegion>(buffer, &legacy_ctx));
}
else
{
- _memory.set_owned_region(support::cpp14::make_unique<CLBufferMemoryRegion>(buffer, _ctx->core_runtime_context()));
+ _memory.set_owned_region(std::make_unique<CLBufferMemoryRegion>(buffer, _ctx->core_runtime_context()));
}
info().set_is_resizable(false);
diff --git a/src/runtime/CL/ICLSimpleFunction.cpp b/src/runtime/CL/ICLSimpleFunction.cpp
index b075aa17e3..4530537789 100644
--- a/src/runtime/CL/ICLSimpleFunction.cpp
+++ b/src/runtime/CL/ICLSimpleFunction.cpp
@@ -28,13 +28,12 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/ICLKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
ICLSimpleFunction::ICLSimpleFunction(CLRuntimeContext *ctx) // NOLINT
: _kernel(),
- _border_handler(support::cpp14::make_unique<CLFillBorderKernel>()),
+ _border_handler(std::make_unique<CLFillBorderKernel>()),
_ctx(ctx)
{
}
diff --git a/src/runtime/CL/functions/CLAbsoluteDifference.cpp b/src/runtime/CL/functions/CLAbsoluteDifference.cpp
index b7f40a516c..ff5b0a864d 100644
--- a/src/runtime/CL/functions/CLAbsoluteDifference.cpp
+++ b/src/runtime/CL/functions/CLAbsoluteDifference.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLAbsoluteDifference.h"
#include "src/core/CL/kernels/CLAbsoluteDifferenceKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLAbsoluteDifference::configure(const ICLTensor *input1, const ICLTensor *i
void CLAbsoluteDifference::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLAbsoluteDifferenceKernel>();
+ auto k = std::make_unique<CLAbsoluteDifferenceKernel>();
k->configure(compile_context, input1, input2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLAccumulate.cpp b/src/runtime/CL/functions/CLAccumulate.cpp
index 742de64e34..44020fd816 100644
--- a/src/runtime/CL/functions/CLAccumulate.cpp
+++ b/src/runtime/CL/functions/CLAccumulate.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLAccumulate.h"
#include "src/core/CL/kernels/CLAccumulateKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLAccumulate::configure(const ICLTensor *input, ICLTensor *accum)
void CLAccumulate::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *accum)
{
- auto k = arm_compute::support::cpp14::make_unique<CLAccumulateKernel>();
+ auto k = std::make_unique<CLAccumulateKernel>();
k->configure(compile_context, input, accum);
_kernel = std::move(k);
}
@@ -49,7 +48,7 @@ void CLAccumulateWeighted::configure(const ICLTensor *input, float alpha, ICLTen
void CLAccumulateWeighted::configure(const CLCompileContext &compile_context, const ICLTensor *input, float alpha, ICLTensor *accum)
{
- auto k = arm_compute::support::cpp14::make_unique<CLAccumulateWeightedKernel>();
+ auto k = std::make_unique<CLAccumulateWeightedKernel>();
k->configure(compile_context, input, alpha, accum);
_kernel = std::move(k);
}
@@ -61,7 +60,7 @@ void CLAccumulateSquared::configure(const ICLTensor *input, uint32_t shift, ICLT
void CLAccumulateSquared::configure(const CLCompileContext &compile_context, const ICLTensor *input, uint32_t shift, ICLTensor *accum)
{
- auto k = arm_compute::support::cpp14::make_unique<CLAccumulateSquaredKernel>();
+ auto k = std::make_unique<CLAccumulateSquaredKernel>();
k->configure(compile_context, input, shift, accum);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLActivationLayer.cpp b/src/runtime/CL/functions/CLActivationLayer.cpp
index 61c82b33eb..0070e43f8c 100644
--- a/src/runtime/CL/functions/CLActivationLayer.cpp
+++ b/src/runtime/CL/functions/CLActivationLayer.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLRuntimeContext.h"
#include "src/core/CL/kernels/CLActivationLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -35,7 +34,7 @@ namespace experimental
{
void CLActivation::configure(const CLCompileContext &compile_context, ITensorInfo *input, ITensorInfo *output, ActivationLayerInfo act_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLActivationLayerKernel>();
+ auto k = std::make_unique<CLActivationLayerKernel>();
k->configure(compile_context, input, output, act_info);
_kernel = std::move(k);
}
@@ -55,7 +54,7 @@ struct CLActivationLayer::Impl
};
CLActivationLayer::CLActivationLayer(CLRuntimeContext *ctx)
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
_impl->ctx = ctx;
}
@@ -78,7 +77,7 @@ void CLActivationLayer::configure(const CLCompileContext &compile_context, ICLTe
_impl->src = input;
_impl->dst = output == nullptr ? input : output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLActivation>();
+ _impl->op = std::make_unique<experimental::CLActivation>();
_impl->op->configure(compile_context, _impl->src->info(), _impl->dst->info(), act_info);
}
diff --git a/src/runtime/CL/functions/CLArgMinMaxLayer.cpp b/src/runtime/CL/functions/CLArgMinMaxLayer.cpp
index 5fc849e3c5..8c32563abb 100644
--- a/src/runtime/CL/functions/CLArgMinMaxLayer.cpp
+++ b/src/runtime/CL/functions/CLArgMinMaxLayer.cpp
@@ -33,7 +33,6 @@
#include "src/core/CL/kernels/CLArgMinMaxLayerKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/runtime/Utils.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -132,7 +131,7 @@ void CLArgMinMaxLayer::configure(const CLCompileContext &compile_context, const
auto add_reduction_kernel = [this, &compile_context, axis, op](const ICLTensor * input, const ICLTensor * prev_output, ICLTensor * output)
{
- _reduction_kernels_vector.emplace_back(support::cpp14::make_unique<CLArgMinMaxLayerKernel>());
+ _reduction_kernels_vector.emplace_back(std::make_unique<CLArgMinMaxLayerKernel>());
_reduction_kernels_vector.back()->configure(compile_context, input, prev_output, output, axis, op);
};
diff --git a/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp b/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
index 77eed1140f..6b76da81c6 100644
--- a/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
@@ -29,14 +29,13 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "support/MemorySupport.h"
#include "src/core/CL/kernels/CLBatchNormalizationLayerKernel.h"
namespace arm_compute
{
CLBatchNormalizationLayer::CLBatchNormalizationLayer()
- : _norm_kernel(support::cpp14::make_unique<CLBatchNormalizationLayerKernel>())
+ : _norm_kernel(std::make_unique<CLBatchNormalizationLayerKernel>())
{
}
diff --git a/src/runtime/CL/functions/CLBatchToSpaceLayer.cpp b/src/runtime/CL/functions/CLBatchToSpaceLayer.cpp
index e0a2c430ed..c2fdb74777 100644
--- a/src/runtime/CL/functions/CLBatchToSpaceLayer.cpp
+++ b/src/runtime/CL/functions/CLBatchToSpaceLayer.cpp
@@ -31,12 +31,11 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLBatchToSpaceLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLBatchToSpaceLayer::CLBatchToSpaceLayer()
- : _batch_to_space_kernel(support::cpp14::make_unique<CLBatchToSpaceLayerKernel>())
+ : _batch_to_space_kernel(std::make_unique<CLBatchToSpaceLayerKernel>())
{
}
diff --git a/src/runtime/CL/functions/CLBitwiseAnd.cpp b/src/runtime/CL/functions/CLBitwiseAnd.cpp
index cfcd63f170..0f9f68cb9c 100644
--- a/src/runtime/CL/functions/CLBitwiseAnd.cpp
+++ b/src/runtime/CL/functions/CLBitwiseAnd.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLBitwiseAnd.h"
#include "src/core/CL/kernels/CLBitwiseAndKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLBitwiseAnd::configure(const ICLTensor *input1, const ICLTensor *input2, I
void CLBitwiseAnd::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLBitwiseAndKernel>();
+ auto k = std::make_unique<CLBitwiseAndKernel>();
k->configure(compile_context, input1, input2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLBitwiseNot.cpp b/src/runtime/CL/functions/CLBitwiseNot.cpp
index 588c793f6a..cd2384590e 100644
--- a/src/runtime/CL/functions/CLBitwiseNot.cpp
+++ b/src/runtime/CL/functions/CLBitwiseNot.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLBitwiseNot.h"
#include "src/core/CL/kernels/CLBitwiseNotKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLBitwiseNot::configure(const ICLTensor *input, ICLTensor *output)
void CLBitwiseNot::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLBitwiseNotKernel>();
+ auto k = std::make_unique<CLBitwiseNotKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLBitwiseOr.cpp b/src/runtime/CL/functions/CLBitwiseOr.cpp
index 3a5de193a3..38db5f78a0 100644
--- a/src/runtime/CL/functions/CLBitwiseOr.cpp
+++ b/src/runtime/CL/functions/CLBitwiseOr.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLBitwiseOr.h"
#include "src/core/CL/kernels/CLBitwiseOrKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLBitwiseOr::configure(const ICLTensor *input1, const ICLTensor *input2, IC
void CLBitwiseOr::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLBitwiseOrKernel>();
+ auto k = std::make_unique<CLBitwiseOrKernel>();
k->configure(compile_context, input1, input2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLBitwiseXor.cpp b/src/runtime/CL/functions/CLBitwiseXor.cpp
index 62aeaaa31f..e477c3b847 100644
--- a/src/runtime/CL/functions/CLBitwiseXor.cpp
+++ b/src/runtime/CL/functions/CLBitwiseXor.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLBitwiseXor.h"
#include "src/core/CL/kernels/CLBitwiseXorKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLBitwiseXor::configure(const ICLTensor *input1, const ICLTensor *input2, I
void CLBitwiseXor::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLBitwiseXorKernel>();
+ auto k = std::make_unique<CLBitwiseXorKernel>();
k->configure(compile_context, input1, input2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLBoundingBoxTransform.cpp b/src/runtime/CL/functions/CLBoundingBoxTransform.cpp
index 600d36290c..0dade0a369 100644
--- a/src/runtime/CL/functions/CLBoundingBoxTransform.cpp
+++ b/src/runtime/CL/functions/CLBoundingBoxTransform.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLBoundingBoxTransform.h"
#include "src/core/CL/kernels/CLBoundingBoxTransformKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -36,7 +35,7 @@ void CLBoundingBoxTransform::configure(const ICLTensor *boxes, ICLTensor *pred_b
void CLBoundingBoxTransform::configure(const CLCompileContext &compile_context, const ICLTensor *boxes, ICLTensor *pred_boxes, const ICLTensor *deltas, const BoundingBoxTransformInfo &info)
{
// Configure Bounding Box kernel
- auto k = arm_compute::support::cpp14::make_unique<CLBoundingBoxTransformKernel>();
+ auto k = std::make_unique<CLBoundingBoxTransformKernel>();
k->configure(compile_context, boxes, pred_boxes, deltas, info);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLBox3x3.cpp b/src/runtime/CL/functions/CLBox3x3.cpp
index be40f25055..09e24d1bc0 100644
--- a/src/runtime/CL/functions/CLBox3x3.cpp
+++ b/src/runtime/CL/functions/CLBox3x3.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/CL/kernels/CLBox3x3Kernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -39,7 +38,7 @@ void CLBox3x3::configure(ICLTensor *input, ICLTensor *output, BorderMode border_
void CLBox3x3::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLBox3x3Kernel>();
+ auto k = std::make_unique<CLBox3x3Kernel>();
k->configure(compile_context, input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, BorderSize(1), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLCannyEdge.cpp b/src/runtime/CL/functions/CLCannyEdge.cpp
index 5a32564d2d..7e99a1bbb3 100644
--- a/src/runtime/CL/functions/CLCannyEdge.cpp
+++ b/src/runtime/CL/functions/CLCannyEdge.cpp
@@ -35,17 +35,16 @@
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLSobel5x5Kernel.h"
#include "src/core/CL/kernels/CLSobel7x7Kernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
CLCannyEdge::CLCannyEdge(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
: _memory_group(std::move(memory_manager)),
_sobel(),
- _gradient(support::cpp14::make_unique<CLGradientKernel>()),
- _border_mag_gradient(support::cpp14::make_unique<CLFillBorderKernel>()),
- _non_max_suppr(support::cpp14::make_unique<CLEdgeNonMaxSuppressionKernel>()),
- _edge_trace(support::cpp14::make_unique<CLEdgeTraceKernel>()),
+ _gradient(std::make_unique<CLGradientKernel>()),
+ _border_mag_gradient(std::make_unique<CLFillBorderKernel>()),
+ _non_max_suppr(std::make_unique<CLEdgeNonMaxSuppressionKernel>()),
+ _edge_trace(std::make_unique<CLEdgeTraceKernel>()),
_gx(),
_gy(),
_mag(),
@@ -123,19 +122,19 @@ void CLCannyEdge::configure(const CLCompileContext &compile_context, ICLTensor *
// Configure/Init sobelNxN
if(gradient_size == 3)
{
- auto k = arm_compute::support::cpp14::make_unique<CLSobel3x3>();
+ auto k = std::make_unique<CLSobel3x3>();
k->configure(compile_context, input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
}
else if(gradient_size == 5)
{
- auto k = arm_compute::support::cpp14::make_unique<CLSobel5x5>();
+ auto k = std::make_unique<CLSobel5x5>();
k->configure(compile_context, input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
}
else if(gradient_size == 7)
{
- auto k = arm_compute::support::cpp14::make_unique<CLSobel7x7>();
+ auto k = std::make_unique<CLSobel7x7>();
k->configure(compile_context, input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLCast.cpp b/src/runtime/CL/functions/CLCast.cpp
index 2a28e06845..202140d8b9 100644
--- a/src/runtime/CL/functions/CLCast.cpp
+++ b/src/runtime/CL/functions/CLCast.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLCast.h"
#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLCast::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy
void CLCast::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy)
{
- auto k = arm_compute::support::cpp14::make_unique<CLDepthConvertLayerKernel>();
+ auto k = std::make_unique<CLDepthConvertLayerKernel>();
k->configure(compile_context, input, output, policy, 0);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLChannelCombine.cpp b/src/runtime/CL/functions/CLChannelCombine.cpp
index e93aea31f4..543de9c653 100644
--- a/src/runtime/CL/functions/CLChannelCombine.cpp
+++ b/src/runtime/CL/functions/CLChannelCombine.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLChannelCombine.h"
#include "src/core/CL/kernels/CLChannelCombineKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLChannelCombine::configure(const ICLTensor *plane0, const ICLTensor *plane
void CLChannelCombine::configure(const CLCompileContext &compile_context, const ICLTensor *plane0, const ICLTensor *plane1, const ICLTensor *plane2, const ICLTensor *plane3, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLChannelCombineKernel>();
+ auto k = std::make_unique<CLChannelCombineKernel>();
k->configure(compile_context, plane0, plane1, plane2, plane3, output);
_kernel = std::move(k);
}
@@ -49,7 +48,7 @@ void CLChannelCombine::configure(const ICLImage *plane0, const ICLImage *plane1,
void CLChannelCombine::configure(const CLCompileContext &compile_context, const ICLImage *plane0, const ICLImage *plane1, const ICLImage *plane2, ICLMultiImage *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLChannelCombineKernel>();
+ auto k = std::make_unique<CLChannelCombineKernel>();
k->configure(compile_context, plane0, plane1, plane2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLChannelExtract.cpp b/src/runtime/CL/functions/CLChannelExtract.cpp
index 8b4a3f7458..645fc051cb 100644
--- a/src/runtime/CL/functions/CLChannelExtract.cpp
+++ b/src/runtime/CL/functions/CLChannelExtract.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLChannelExtract.h"
#include "src/core/CL/kernels/CLChannelExtractKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLChannelExtract::configure(const ICLTensor *input, Channel channel, ICLTen
void CLChannelExtract::configure(const CLCompileContext &compile_context, const ICLTensor *input, Channel channel, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLChannelExtractKernel>();
+ auto k = std::make_unique<CLChannelExtractKernel>();
k->configure(compile_context, input, channel, output);
_kernel = std::move(k);
}
@@ -49,7 +48,7 @@ void CLChannelExtract::configure(const ICLMultiImage *input, Channel channel, IC
void CLChannelExtract::configure(const CLCompileContext &compile_context, const ICLMultiImage *input, Channel channel, ICLImage *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLChannelExtractKernel>();
+ auto k = std::make_unique<CLChannelExtractKernel>();
k->configure(compile_context, input, channel, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLChannelShuffleLayer.cpp b/src/runtime/CL/functions/CLChannelShuffleLayer.cpp
index c443df3b37..c6af5a05d5 100644
--- a/src/runtime/CL/functions/CLChannelShuffleLayer.cpp
+++ b/src/runtime/CL/functions/CLChannelShuffleLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/Types.h"
#include "src/core/CL/kernels/CLChannelShuffleLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -36,7 +35,7 @@ void CLChannelShuffleLayer::configure(const ICLTensor *input, ICLTensor *output,
void CLChannelShuffleLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, unsigned int num_groups)
{
- auto k = arm_compute::support::cpp14::make_unique<CLChannelShuffleLayerKernel>();
+ auto k = std::make_unique<CLChannelShuffleLayerKernel>();
k->configure(compile_context, input, output, num_groups);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLColorConvert.cpp b/src/runtime/CL/functions/CLColorConvert.cpp
index 95f4257929..9aeeb65dc4 100644
--- a/src/runtime/CL/functions/CLColorConvert.cpp
+++ b/src/runtime/CL/functions/CLColorConvert.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLColorConvert.h"
#include "src/core/CL/kernels/CLColorConvertKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLColorConvert::configure(const ICLTensor *input, ICLTensor *output)
void CLColorConvert::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLColorConvertKernel>();
+ auto k = std::make_unique<CLColorConvertKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
@@ -49,7 +48,7 @@ void CLColorConvert::configure(const ICLImage *input, ICLMultiImage *output)
void CLColorConvert::configure(const CLCompileContext &compile_context, const ICLImage *input, ICLMultiImage *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLColorConvertKernel>();
+ auto k = std::make_unique<CLColorConvertKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
@@ -61,7 +60,7 @@ void CLColorConvert::configure(const ICLMultiImage *input, ICLImage *output)
void CLColorConvert::configure(const CLCompileContext &compile_context, const ICLMultiImage *input, ICLImage *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLColorConvertKernel>();
+ auto k = std::make_unique<CLColorConvertKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
@@ -73,7 +72,7 @@ void CLColorConvert::configure(const ICLMultiImage *input, ICLMultiImage *output
void CLColorConvert::configure(const CLCompileContext &compile_context, const ICLMultiImage *input, ICLMultiImage *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLColorConvertKernel>();
+ auto k = std::make_unique<CLColorConvertKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLComparison.cpp b/src/runtime/CL/functions/CLComparison.cpp
index 9b5840aa95..4122928578 100644
--- a/src/runtime/CL/functions/CLComparison.cpp
+++ b/src/runtime/CL/functions/CLComparison.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/core/Types.h"
#include "src/core/CL/kernels/CLComparisonKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -38,7 +37,7 @@ void CLComparison::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *ou
void CLComparison::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ComparisonOperation operation)
{
- auto k = arm_compute::support::cpp14::make_unique<CLComparisonKernel>();
+ auto k = std::make_unique<CLComparisonKernel>();
k->configure(compile_context, input1, input2, output, operation);
_kernel = std::move(k);
@@ -67,7 +66,7 @@ void CLComparisonStatic<COP>::configure(ICLTensor *input1, ICLTensor *input2, IC
template <ComparisonOperation COP>
void CLComparisonStatic<COP>::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLComparisonKernel>();
+ auto k = std::make_unique<CLComparisonKernel>();
k->configure(compile_context, input1, input2, output, COP);
_kernel = std::move(k);
diff --git a/src/runtime/CL/functions/CLComputeAllAnchors.cpp b/src/runtime/CL/functions/CLComputeAllAnchors.cpp
index 2cae0ee455..5838e32ed8 100644
--- a/src/runtime/CL/functions/CLComputeAllAnchors.cpp
+++ b/src/runtime/CL/functions/CLComputeAllAnchors.cpp
@@ -24,8 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLComputeAllAnchors.h"
#include "src/core/CL/kernels/CLGenerateProposalsLayerKernel.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
void CLComputeAllAnchors::configure(const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
@@ -36,7 +34,7 @@ void CLComputeAllAnchors::configure(const ICLTensor *anchors, ICLTensor *all_anc
void CLComputeAllAnchors::configure(const CLCompileContext &compile_context, const ICLTensor *anchors, ICLTensor *all_anchors, const ComputeAnchorsInfo &info)
{
// Configure ComputeAllAnchors kernel
- auto k = arm_compute::support::cpp14::make_unique<CLComputeAllAnchorsKernel>();
+ auto k = std::make_unique<CLComputeAllAnchorsKernel>();
k->configure(compile_context, anchors, all_anchors, info);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLConcatenateLayer.cpp b/src/runtime/CL/functions/CLConcatenateLayer.cpp
index 54f71f9765..0c473a79c8 100644
--- a/src/runtime/CL/functions/CLConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLConcatenateLayer.cpp
@@ -37,7 +37,6 @@
#include "arm_compute/core/Types.h"
#include "src/core/CL/kernels/CLBatchConcatenateLayerKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -78,7 +77,7 @@ void CLConcatenation::configure(const CLCompileContext &compile_context, const s
case 2:
{
// Configure WidthConcatenate2Tensors kernel
- auto kernel = support::cpp14::make_unique<CLWidthConcatenate2TensorsKernel>();
+ auto kernel = std::make_unique<CLWidthConcatenate2TensorsKernel>();
kernel->configure(compile_context, inputs_vector.at(0), inputs_vector.at(1), output);
_concat_kernels.emplace_back(std::move(kernel));
break;
@@ -86,7 +85,7 @@ void CLConcatenation::configure(const CLCompileContext &compile_context, const s
case 4:
{
// Configure WidthConcatenate4Tensors kernel
- auto kernel = support::cpp14::make_unique<CLWidthConcatenate4TensorsKernel>();
+ auto kernel = std::make_unique<CLWidthConcatenate4TensorsKernel>();
kernel->configure(compile_context, inputs_vector.at(0), inputs_vector.at(1), inputs_vector.at(2), inputs_vector.at(3), output);
_concat_kernels.emplace_back(std::move(kernel));
break;
@@ -96,7 +95,7 @@ void CLConcatenation::configure(const CLCompileContext &compile_context, const s
// Configure generic case WidthConcatenate kernels
for(unsigned int i = 0; i < _num_inputs; ++i)
{
- auto kernel = support::cpp14::make_unique<CLWidthConcatenateLayerKernel>();
+ auto kernel = std::make_unique<CLWidthConcatenateLayerKernel>();
kernel->configure(compile_context, inputs_vector.at(i), offset, output);
offset += inputs_vector.at(i)->dimension(_axis);
_concat_kernels.emplace_back(std::move(kernel));
@@ -110,7 +109,7 @@ void CLConcatenation::configure(const CLCompileContext &compile_context, const s
{
for(unsigned int i = 0; i < _num_inputs; ++i)
{
- auto kernel = support::cpp14::make_unique<CLHeightConcatenateLayerKernel>();
+ auto kernel = std::make_unique<CLHeightConcatenateLayerKernel>();
kernel->configure(compile_context, inputs_vector.at(i), offset, output);
offset += inputs_vector.at(i)->dimension(_axis);
_concat_kernels.emplace_back(std::move(kernel));
@@ -121,7 +120,7 @@ void CLConcatenation::configure(const CLCompileContext &compile_context, const s
{
for(unsigned int i = 0; i < _num_inputs; ++i)
{
- auto kernel = support::cpp14::make_unique<CLDepthConcatenateLayerKernel>();
+ auto kernel = std::make_unique<CLDepthConcatenateLayerKernel>();
kernel->configure(compile_context, inputs_vector.at(i), offset, output);
offset += inputs_vector.at(i)->dimension(_axis);
_concat_kernels.emplace_back(std::move(kernel));
@@ -132,7 +131,7 @@ void CLConcatenation::configure(const CLCompileContext &compile_context, const s
{
for(unsigned int i = 0; i < _num_inputs; ++i)
{
- auto kernel = support::cpp14::make_unique<CLBatchConcatenateLayerKernel>();
+ auto kernel = std::make_unique<CLBatchConcatenateLayerKernel>();
kernel->configure(compile_context, inputs_vector.at(i), offset, output);
offset += inputs_vector.at(i)->dimension(_axis);
_concat_kernels.emplace_back(std::move(kernel));
@@ -263,7 +262,7 @@ struct CLConcatenateLayer::Impl
};
CLConcatenateLayer::CLConcatenateLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -286,7 +285,7 @@ void CLConcatenateLayer::configure(const CLCompileContext &compile_context, std:
_impl->dst = output;
_impl->axis = axis;
_impl->num_inputs = inputs_vector.size();
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLConcatenation>();
+ _impl->op = std::make_unique<experimental::CLConcatenation>();
std::vector<ITensorInfo *> inputs_vector_info;
for(unsigned int i = 0; i < inputs_vector.size(); ++i)
diff --git a/src/runtime/CL/functions/CLConvertFullyConnectedWeights.cpp b/src/runtime/CL/functions/CLConvertFullyConnectedWeights.cpp
index 8ecc114343..bbe9b487e5 100644
--- a/src/runtime/CL/functions/CLConvertFullyConnectedWeights.cpp
+++ b/src/runtime/CL/functions/CLConvertFullyConnectedWeights.cpp
@@ -25,8 +25,6 @@
#include "src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
void CLConvertFullyConnectedWeights::configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape,
@@ -38,7 +36,7 @@ void CLConvertFullyConnectedWeights::configure(const ICLTensor *input, ICLTensor
void CLConvertFullyConnectedWeights::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape,
DataLayout data_layout)
{
- auto k = arm_compute::support::cpp14::make_unique<CLConvertFullyConnectedWeightsKernel>();
+ auto k = std::make_unique<CLConvertFullyConnectedWeightsKernel>();
k->configure(compile_context, input, output, original_input_shape, data_layout);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLConvolution.cpp b/src/runtime/CL/functions/CLConvolution.cpp
index 1ad32d309c..49dae49146 100644
--- a/src/runtime/CL/functions/CLConvolution.cpp
+++ b/src/runtime/CL/functions/CLConvolution.cpp
@@ -33,7 +33,6 @@
#include "arm_compute/runtime/ITensorAllocator.h"
#include "src/core/CL/kernels/CLConvolutionKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -47,7 +46,7 @@ void CLConvolution3x3::configure(ICLTensor *input, ICLTensor *output, const int1
void CLConvolution3x3::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t scale, BorderMode border_mode,
uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLConvolution3x3Kernel>();
+ auto k = std::make_unique<CLConvolution3x3Kernel>();
k->configure(compile_context, input, output, conv, scale, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
@@ -55,9 +54,8 @@ void CLConvolution3x3::configure(const CLCompileContext &compile_context, ICLTen
template <unsigned int matrix_size>
CLConvolutionSquare<matrix_size>::CLConvolutionSquare(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _tmp(), _is_separable(false), _kernel_hor(support::cpp14::make_unique<CLSeparableConvolutionHorKernel<matrix_size>>()),
- _kernel_vert(support::cpp14::make_unique<CLSeparableConvolutionVertKernel<matrix_size>>()), _kernel(support::cpp14::make_unique<CLConvolutionKernel<matrix_size>>()),
- _border_handler(support::cpp14::make_unique<CLFillBorderKernel>())
+ : _memory_group(std::move(memory_manager)), _tmp(), _is_separable(false), _kernel_hor(std::make_unique<CLSeparableConvolutionHorKernel<matrix_size>>()),
+ _kernel_vert(std::make_unique<CLSeparableConvolutionVertKernel<matrix_size>>()), _kernel(std::make_unique<CLConvolutionKernel<matrix_size>>()), _border_handler(std::make_unique<CLFillBorderKernel>())
{
}
@@ -138,7 +136,7 @@ void CLConvolutionRectangle::configure(ICLTensor *input, ICLTensor *output, cons
void CLConvolutionRectangle::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const int16_t *conv, uint32_t rows, uint32_t cols, uint32_t scale,
BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLConvolutionRectangleKernel>();
+ auto k = std::make_unique<CLConvolutionRectangleKernel>();
k->configure(compile_context, input, output, conv, rows, cols, scale, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index e214bdf0f2..edd9298d26 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -29,7 +29,6 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "support/MemorySupport.h"
#include <cmath>
#include <memory>
@@ -66,7 +65,7 @@ void CLConvolutionLayer::configure(const CLCompileContext &compile_context, ICLT
case ConvolutionMethod::WINOGRAD:
{
ARM_COMPUTE_ERROR_ON(num_groups != 1);
- auto f = arm_compute::support::cpp14::make_unique<CLWinogradConvolutionLayer>(_memory_manager);
+ auto f = std::make_unique<CLWinogradConvolutionLayer>(_memory_manager);
f->configure(compile_context, input, weights, biases, output, conv_info, act_info, enable_fast_math);
_function = std::move(f);
break;
@@ -74,21 +73,21 @@ void CLConvolutionLayer::configure(const CLCompileContext &compile_context, ICLT
case ConvolutionMethod::DIRECT:
{
ARM_COMPUTE_ERROR_ON(num_groups != 1);
- auto f = arm_compute::support::cpp14::make_unique<CLDirectConvolutionLayer>();
+ auto f = std::make_unique<CLDirectConvolutionLayer>();
f->configure(compile_context, input, weights, biases, output, conv_info, act_info);
_function = std::move(f);
break;
}
case ConvolutionMethod::GEMM:
{
- auto f = arm_compute::support::cpp14::make_unique<CLGEMMConvolutionLayer>(_memory_manager);
+ auto f = std::make_unique<CLGEMMConvolutionLayer>(_memory_manager);
f->configure(compile_context, input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups);
_function = std::move(f);
break;
}
case ConvolutionMethod::FFT:
{
- auto f = arm_compute::support::cpp14::make_unique<CLFFTConvolutionLayer>(_memory_manager);
+ auto f = std::make_unique<CLFFTConvolutionLayer>(_memory_manager);
f->configure(compile_context, input, weights, biases, output, conv_info, act_info);
_function = std::move(f);
break;
diff --git a/src/runtime/CL/functions/CLCopy.cpp b/src/runtime/CL/functions/CLCopy.cpp
index f7b016a779..c3e30ada6e 100644
--- a/src/runtime/CL/functions/CLCopy.cpp
+++ b/src/runtime/CL/functions/CLCopy.cpp
@@ -29,7 +29,6 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include "src/core/CL/kernels/CLCopyKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -42,7 +41,7 @@ void CLCopy::configure(ICLTensor *input, ICLTensor *output)
void CLCopy::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLCopyKernel>();
+ auto k = std::make_unique<CLCopyKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLCropResize.cpp b/src/runtime/CL/functions/CLCropResize.cpp
index 4aaa674c5c..ed31446cf9 100644
--- a/src/runtime/CL/functions/CLCropResize.cpp
+++ b/src/runtime/CL/functions/CLCropResize.cpp
@@ -32,8 +32,6 @@
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-#include "support/MemorySupport.h"
-
#include <cstddef>
namespace arm_compute
@@ -126,13 +124,13 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
_box_ind->map(CLScheduler::get().queue());
for(unsigned int num_box = 0; num_box < _num_boxes; ++num_box)
{
- auto crop_tensor = support::cpp14::make_unique<CLTensor>();
+ auto crop_tensor = std::make_unique<CLTensor>();
TensorInfo crop_result_info(1, DataType::F32);
crop_result_info.set_data_layout(DataLayout::NHWC);
crop_tensor->allocator()->init(crop_result_info);
_crop_results.emplace_back(std::move(crop_tensor));
- auto scale_tensor = support::cpp14::make_unique<CLTensor>();
+ auto scale_tensor = std::make_unique<CLTensor>();
TensorInfo scaled_result_info(out_shape, 1, DataType::F32);
scaled_result_info.set_data_layout(DataLayout::NHWC);
scale_tensor->allocator()->init(scaled_result_info);
@@ -144,14 +142,14 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
Coordinates end{};
configure_crop(_input, _boxes, _box_ind, _crop_results[num_box].get(), num_box, start, end, batch_index);
- auto scale_kernel = support::cpp14::make_unique<CLScale>();
+ auto scale_kernel = std::make_unique<CLScale>();
scale_kernel->configure(compile_context, _crop_results[num_box].get(), _scaled_results[num_box].get(), ScaleKernelInfo{ _method, BorderMode::CONSTANT, PixelValue(_extrapolation_value), SamplingPolicy::TOP_LEFT });
_scale.emplace_back(std::move(scale_kernel));
Window win = calculate_max_window(*_output->info());
win.set(3, Window::Dimension(num_box, num_box + 1, 1));
- auto copy_kernel = support::cpp14::make_unique<CLCopyKernel>();
+ auto copy_kernel = std::make_unique<CLCopyKernel>();
copy_kernel->configure(compile_context, _scaled_results[num_box].get(), _output, &win);
_copy.emplace_back(std::move(copy_kernel));
@@ -209,7 +207,7 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
{
Window slice_fill_rows_before(full_window);
slice_fill_rows_before.set(2, Window::Dimension(0, rows_out_of_bounds[0], 1));
- auto kernel = arm_compute::support::cpp14::make_unique<CLMemsetKernel>();
+ auto kernel = std::make_unique<CLMemsetKernel>();
kernel->configure(compile_context, _crop_results[num_box].get(), extrapolation_value, &slice_fill_rows_before);
_internal_kernels.push_back(std::move(kernel));
}
@@ -226,7 +224,7 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
{
Window slice_fill_cols_before(slice_in);
slice_fill_cols_before.set(1, Window::Dimension(0, cols_out_of_bounds[0], 1));
- auto kernel = arm_compute::support::cpp14::make_unique<CLMemsetKernel>();
+ auto kernel = std::make_unique<CLMemsetKernel>();
kernel->configure(compile_context, _crop_results[num_box].get(), extrapolation_value, &slice_fill_cols_before);
_internal_kernels.push_back(std::move(kernel));
}
@@ -235,7 +233,7 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
{
Window slice_fill_cols_after(slice_in);
slice_fill_cols_after.set(1, Window::Dimension(_crop_results[num_box].get()->info()->dimension(1) - cols_out_of_bounds[1], _crop_results[num_box].get()->info()->dimension(1), 1));
- auto kernel = arm_compute::support::cpp14::make_unique<CLMemsetKernel>();
+ auto kernel = std::make_unique<CLMemsetKernel>();
kernel->configure(compile_context, _crop_results[num_box].get(), extrapolation_value, &slice_fill_cols_after);
_internal_kernels.push_back(std::move(kernel));
}
@@ -248,7 +246,7 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
is_height_flipped ? start[1] - rows_out_of_bounds[0] : start[1] + rows_out_of_bounds[0] };
Coordinates2D end_in{ is_width_flipped ? start_in.x - cols_in_bounds + 1 : start_in.x + cols_in_bounds - 1,
is_height_flipped ? start_in.y - rows_in_bounds + 1 : start_in.y + rows_in_bounds - 1 };
- auto kernel = arm_compute::support::cpp14::make_unique<CLCropKernel>();
+ auto kernel = std::make_unique<CLCropKernel>();
kernel->configure(compile_context, _input, _crop_results[num_box].get(), start_in, end_in, batch_index, extrapolation_value, &slice_in);
_internal_kernels.push_back(std::move(kernel));
@@ -260,7 +258,7 @@ void CLCropResize::configure(const CLCompileContext &compile_context, const ICLT
{
Window slice_fill_rows_after(full_window);
slice_fill_rows_after.set(2, Window::Dimension(_crop_results[num_box].get()->info()->dimension(2) - rows_out_of_bounds[1], _crop_results[num_box].get()->info()->dimension(2), 1));
- auto kernel = arm_compute::support::cpp14::make_unique<CLMemsetKernel>();
+ auto kernel = std::make_unique<CLMemsetKernel>();
kernel->configure(compile_context, _crop_results[num_box].get(), extrapolation_value, &slice_fill_rows_after);
_internal_kernels.push_back(std::move(kernel));
}
diff --git a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp
index 6fe231ea6c..75f34cc5ee 100644
--- a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "support/MemorySupport.h"
#include <cmath>
#include <memory>
@@ -57,14 +56,14 @@ void CLDeconvolutionLayer::configure(const CLCompileContext &compile_context, IC
{
case DeconvolutionMethod::DIRECT:
{
- auto f = arm_compute::support::cpp14::make_unique<CLDirectDeconvolutionLayer>();
+ auto f = std::make_unique<CLDirectDeconvolutionLayer>();
f->configure(compile_context, input, weights, bias, output, deconv_info, weights_info);
_function = std::move(f);
break;
}
case DeconvolutionMethod::GEMM:
{
- auto f = arm_compute::support::cpp14::make_unique<CLGEMMDeconvolutionLayer>(_memory_manager);
+ auto f = std::make_unique<CLGEMMDeconvolutionLayer>(_memory_manager);
f->configure(compile_context, input, weights, bias, output, deconv_info);
_function = std::move(f);
break;
diff --git a/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp b/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp
index 0cf2ea623f..4989f6460d 100644
--- a/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp
+++ b/src/runtime/CL/functions/CLDeconvolutionLayerUpsample.cpp
@@ -29,13 +29,12 @@
#include "arm_compute/runtime/CL/CLTensor.h"
#include "src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
#include "src/core/CL/kernels/CLMemsetKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLDeconvolutionLayerUpsample::CLDeconvolutionLayerUpsample() // NOLINT
- : _upsample(support::cpp14::make_unique<CLDeconvolutionLayerUpsampleKernel>()),
- _memset(support::cpp14::make_unique<CLMemsetKernel>()),
+ : _upsample(std::make_unique<CLDeconvolutionLayerUpsampleKernel>()),
+ _memset(std::make_unique<CLMemsetKernel>()),
_output(nullptr)
{
}
diff --git a/src/runtime/CL/functions/CLDepthConvertLayer.cpp b/src/runtime/CL/functions/CLDepthConvertLayer.cpp
index e58c0e5f4c..47bc52364d 100644
--- a/src/runtime/CL/functions/CLDepthConvertLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthConvertLayer.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLDepthConvertLayer::configure(const ICLTensor *input, ICLTensor *output, C
void CLDepthConvertLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
{
- auto k = arm_compute::support::cpp14::make_unique<CLDepthConvertLayerKernel>();
+ auto k = std::make_unique<CLDepthConvertLayerKernel>();
k->configure(compile_context, input, output, policy, shift);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLDepthToSpaceLayer.cpp b/src/runtime/CL/functions/CLDepthToSpaceLayer.cpp
index 8dbd974ceb..bd2303c410 100644
--- a/src/runtime/CL/functions/CLDepthToSpaceLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthToSpaceLayer.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLDepthToSpaceLayer.h"
#include "src/core/CL/kernels/CLDepthToSpaceLayerKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLDepthToSpaceLayer::configure(const ICLTensor *input, ICLTensor *output, i
void CLDepthToSpaceLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, int32_t block_shape)
{
- auto k = arm_compute::support::cpp14::make_unique<CLDepthToSpaceLayerKernel>();
+ auto k = std::make_unique<CLDepthToSpaceLayerKernel>();
k->configure(compile_context, input, output, block_shape);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
index 2440384e3b..8d2c81bc15 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
@@ -37,7 +37,6 @@
#include "src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -125,7 +124,7 @@ Status validate_arguments_3x3(const ITensorInfo *input, const ITensorInfo *weigh
CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::CLDepthwiseConvolutionLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
- _dwc_native_kernel(support::cpp14::make_unique<CLDepthwiseConvolutionLayerNativeKernel>()),
+ _dwc_native_kernel(std::make_unique<CLDepthwiseConvolutionLayerNativeKernel>()),
_permute_input_to_nhwc(),
_permute_weights_to_nhwc(),
_permute_output_to_nchw(),
@@ -351,11 +350,11 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::prepare()
CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::CLDepthwiseConvolutionLayerInternal3x3(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
_kernel(nullptr),
- _border_handler(support::cpp14::make_unique<CLFillBorderKernel>()),
+ _border_handler(std::make_unique<CLFillBorderKernel>()),
_permute_input_to_nchw(),
_permute_weights_to_nchw(),
_permute_output_to_nhwc(),
- _reshape_weights(support::cpp14::make_unique<CLDepthwiseConvolutionLayerReshapeWeightsKernel>()),
+ _reshape_weights(std::make_unique<CLDepthwiseConvolutionLayerReshapeWeightsKernel>()),
_permuted_input(),
_permuted_weights(),
_permuted_output(),
@@ -436,7 +435,7 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::config
weights_to_use = &_permuted_weights;
output_to_use = &_permuted_output;
- _kernel = arm_compute::support::cpp14::make_unique<CLDepthwiseConvolutionLayer3x3NCHWKernel>();
+ _kernel = std::make_unique<CLDepthwiseConvolutionLayer3x3NCHWKernel>();
}
else if(is_nhwc)
{
@@ -445,11 +444,11 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::config
_reshape_weights->configure(compile_context, weights, &_permuted_weights, info);
weights_to_use = &_permuted_weights;
}
- _kernel = arm_compute::support::cpp14::make_unique<CLDepthwiseConvolutionLayer3x3NHWCKernel>();
+ _kernel = std::make_unique<CLDepthwiseConvolutionLayer3x3NHWCKernel>();
}
else
{
- _kernel = arm_compute::support::cpp14::make_unique<CLDepthwiseConvolutionLayer3x3NCHWKernel>();
+ _kernel = std::make_unique<CLDepthwiseConvolutionLayer3x3NCHWKernel>();
}
CLTensor *output_multipliers_to_use = nullptr;
diff --git a/src/runtime/CL/functions/CLDequantizationLayer.cpp b/src/runtime/CL/functions/CLDequantizationLayer.cpp
index 6d63463906..d358813724 100644
--- a/src/runtime/CL/functions/CLDequantizationLayer.cpp
+++ b/src/runtime/CL/functions/CLDequantizationLayer.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLDequantizationLayer.h"
#include "src/core/CL/kernels/CLDequantizationLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -35,7 +34,7 @@ void CLDequantizationLayer::configure(const ICLTensor *input, ICLTensor *output)
void CLDequantizationLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLDequantizationLayerKernel>();
+ auto k = std::make_unique<CLDequantizationLayerKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLDerivative.cpp b/src/runtime/CL/functions/CLDerivative.cpp
index a2b883ad28..2e3ecf7700 100644
--- a/src/runtime/CL/functions/CLDerivative.cpp
+++ b/src/runtime/CL/functions/CLDerivative.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/CL/kernels/CLDerivativeKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -39,7 +38,7 @@ void CLDerivative::configure(ICLTensor *input, ICLTensor *output_x, ICLTensor *o
void CLDerivative::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLDerivativeKernel>();
+ auto k = std::make_unique<CLDerivativeKernel>();
k->configure(compile_context, input, output_x, output_y, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, BorderSize(1), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLDilate.cpp b/src/runtime/CL/functions/CLDilate.cpp
index c3d5f8845f..92c5cc7ab1 100644
--- a/src/runtime/CL/functions/CLDilate.cpp
+++ b/src/runtime/CL/functions/CLDilate.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/CL/kernels/CLDilateKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -39,7 +38,7 @@ void CLDilate::configure(ICLTensor *input, ICLTensor *output, BorderMode border_
void CLDilate::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLDilateKernel>();
+ auto k = std::make_unique<CLDilateKernel>();
k->configure(compile_context, input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, BorderSize(1), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
index bff882c28b..49e97693e4 100644
--- a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
@@ -30,12 +30,11 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
CLDirectConvolutionLayer::CLDirectConvolutionLayer()
- : _direct_conv_kernel(support::cpp14::make_unique<CLDirectConvolutionLayerKernel>()), _input_border_handler(support::cpp14::make_unique<CLFillBorderKernel>()), _activationlayer_function(),
+ : _direct_conv_kernel(std::make_unique<CLDirectConvolutionLayerKernel>()), _input_border_handler(std::make_unique<CLFillBorderKernel>()), _activationlayer_function(),
_is_activationlayer_enabled(false)
{
}
diff --git a/src/runtime/CL/functions/CLElementWiseUnaryLayer.cpp b/src/runtime/CL/functions/CLElementWiseUnaryLayer.cpp
index 35ed97d381..0ded640f51 100644
--- a/src/runtime/CL/functions/CLElementWiseUnaryLayer.cpp
+++ b/src/runtime/CL/functions/CLElementWiseUnaryLayer.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h"
#include "src/core/CL/kernels/CLElementWiseUnaryLayerKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ namespace experimental
{
void CLRsqrt::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
+ auto k = std::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::RSQRT);
_kernel = std::move(k);
}
@@ -46,7 +45,7 @@ Status CLRsqrt::validate(const ITensorInfo *input, const ITensorInfo *output)
void CLExp::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
+ auto k = std::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::EXP);
_kernel = std::move(k);
}
@@ -58,7 +57,7 @@ Status CLExp::validate(const ITensorInfo *input, const ITensorInfo *output)
void CLNeg::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
+ auto k = std::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::NEG);
_kernel = std::move(k);
}
@@ -70,7 +69,7 @@ Status CLNeg::validate(const ITensorInfo *input, const ITensorInfo *output)
void CLSin::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
+ auto k = std::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::SIN);
_kernel = std::move(k);
}
@@ -82,7 +81,7 @@ Status CLSin::validate(const ITensorInfo *input, const ITensorInfo *output)
void CLAbs::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
+ auto k = std::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::ABS);
_kernel = std::move(k);
}
@@ -94,7 +93,7 @@ Status CLAbs::validate(const ITensorInfo *input, const ITensorInfo *output)
void CLLog::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
+ auto k = std::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::LOG);
_kernel = std::move(k);
}
@@ -106,7 +105,7 @@ Status CLLog::validate(const ITensorInfo *input, const ITensorInfo *output)
void CLRound::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
+ auto k = std::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::ROUND);
_kernel = std::move(k);
}
@@ -125,7 +124,7 @@ struct CLRsqrtLayer::Impl
};
CLRsqrtLayer::CLRsqrtLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -142,7 +141,7 @@ void CLRsqrtLayer::configure(const CLCompileContext &compile_context, const ICLT
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLRsqrt>();
+ _impl->op = std::make_unique<experimental::CLRsqrt>();
_impl->op->configure(compile_context, input->info(), output->info());
}
@@ -167,7 +166,7 @@ struct CLExpLayer::Impl
};
CLExpLayer::CLExpLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -184,7 +183,7 @@ void CLExpLayer::configure(const CLCompileContext &compile_context, const ICLTen
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLExp>();
+ _impl->op = std::make_unique<experimental::CLExp>();
_impl->op->configure(compile_context, input->info(), output->info());
}
@@ -209,7 +208,7 @@ struct CLNegLayer::Impl
};
CLNegLayer::CLNegLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -226,7 +225,7 @@ void CLNegLayer::configure(const CLCompileContext &compile_context, const ICLTen
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLNeg>();
+ _impl->op = std::make_unique<experimental::CLNeg>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLNegLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
@@ -250,7 +249,7 @@ struct CLSinLayer::Impl
};
CLSinLayer::CLSinLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -267,7 +266,7 @@ void CLSinLayer::configure(const CLCompileContext &compile_context, const ICLTen
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLSin>();
+ _impl->op = std::make_unique<experimental::CLSin>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLSinLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
@@ -291,7 +290,7 @@ struct CLAbsLayer::Impl
};
CLAbsLayer::CLAbsLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -308,7 +307,7 @@ void CLAbsLayer::configure(const CLCompileContext &compile_context, const ICLTen
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLAbs>();
+ _impl->op = std::make_unique<experimental::CLAbs>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLAbsLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
@@ -332,7 +331,7 @@ struct CLLogLayer::Impl
};
CLLogLayer::CLLogLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -349,7 +348,7 @@ void CLLogLayer::configure(const CLCompileContext &compile_context, const ICLTen
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLLog>();
+ _impl->op = std::make_unique<experimental::CLLog>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLLogLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
@@ -373,7 +372,7 @@ struct CLRoundLayer::Impl
};
CLRoundLayer::CLRoundLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -390,7 +389,7 @@ void CLRoundLayer::configure(const CLCompileContext &compile_context, const ICLT
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLRound>();
+ _impl->op = std::make_unique<experimental::CLRound>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLRoundLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
diff --git a/src/runtime/CL/functions/CLElementwiseOperations.cpp b/src/runtime/CL/functions/CLElementwiseOperations.cpp
index 736cf973a1..a72e957fe6 100644
--- a/src/runtime/CL/functions/CLElementwiseOperations.cpp
+++ b/src/runtime/CL/functions/CLElementwiseOperations.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLElementwiseOperationKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -40,7 +39,7 @@ CLArithmeticAddition::CLArithmeticAddition()
void CLArithmeticAddition::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLSaturatedArithmeticOperationKernel>();
+ auto k = std::make_unique<CLSaturatedArithmeticOperationKernel>();
k->configure(compile_context, ArithmeticOperation::ADD, input1, input2, output, policy, act_info);
_kernel = std::move(k);
}
@@ -61,7 +60,7 @@ CLArithmeticSubtraction::CLArithmeticSubtraction()
void CLArithmeticSubtraction::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
const ActivationLayerInfo &act_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLSaturatedArithmeticOperationKernel>();
+ auto k = std::make_unique<CLSaturatedArithmeticOperationKernel>();
k->configure(compile_context, ArithmeticOperation::SUB, input1, input2, output, policy, act_info);
_kernel = std::move(k);
}
@@ -83,7 +82,7 @@ CLArithmeticDivision::CLArithmeticDivision()
void CLArithmeticDivision::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
+ auto k = std::make_unique<CLArithmeticOperationKernel>();
k->configure(compile_context, ArithmeticOperation::DIV, input1, input2, output, act_info);
_kernel = std::move(k);
}
@@ -104,7 +103,7 @@ CLElementwiseMax::CLElementwiseMax()
void CLElementwiseMax::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
+ auto k = std::make_unique<CLArithmeticOperationKernel>();
k->configure(compile_context, ArithmeticOperation::MAX, input1, input2, output, act_info);
_kernel = std::move(k);
}
@@ -125,7 +124,7 @@ CLElementwiseMin::CLElementwiseMin()
void CLElementwiseMin::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
+ auto k = std::make_unique<CLArithmeticOperationKernel>();
k->configure(compile_context, ArithmeticOperation::MIN, input1, input2, output, act_info);
_kernel = std::move(k);
}
@@ -146,7 +145,7 @@ CLElementwiseSquaredDiff::CLElementwiseSquaredDiff()
void CLElementwiseSquaredDiff::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
+ auto k = std::make_unique<CLArithmeticOperationKernel>();
k->configure(compile_context, ArithmeticOperation::SQUARED_DIFF, input1, input2, output, act_info);
_kernel = std::move(k);
}
@@ -167,7 +166,7 @@ CLElementwisePower::CLElementwisePower()
void CLElementwisePower::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
+ auto k = std::make_unique<CLArithmeticOperationKernel>();
k->configure(compile_context, ArithmeticOperation::POWER, input1, input2, output, act_info);
_kernel = std::move(k);
}
@@ -192,7 +191,7 @@ struct CLArithmeticAddition::Impl
};
CLArithmeticAddition::CLArithmeticAddition()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLArithmeticAddition::CLArithmeticAddition(CLArithmeticAddition &&) = default;
@@ -210,7 +209,7 @@ void CLArithmeticAddition::configure(const CLCompileContext &compile_context, co
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLArithmeticAddition>();
+ _impl->op = std::make_unique<experimental::CLArithmeticAddition>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), policy, act_info);
}
@@ -238,7 +237,7 @@ struct CLArithmeticSubtraction::Impl
};
CLArithmeticSubtraction::CLArithmeticSubtraction()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLArithmeticSubtraction::CLArithmeticSubtraction(CLArithmeticSubtraction &&) = default;
@@ -256,7 +255,7 @@ void CLArithmeticSubtraction::configure(const CLCompileContext &compile_context,
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLArithmeticSubtraction>();
+ _impl->op = std::make_unique<experimental::CLArithmeticSubtraction>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), policy, act_info);
}
@@ -284,7 +283,7 @@ struct CLArithmeticDivision::Impl
};
CLArithmeticDivision::CLArithmeticDivision()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLArithmeticDivision::CLArithmeticDivision(CLArithmeticDivision &&) = default;
@@ -301,7 +300,7 @@ void CLArithmeticDivision::configure(const CLCompileContext &compile_context, co
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLArithmeticDivision>();
+ _impl->op = std::make_unique<experimental::CLArithmeticDivision>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info);
}
@@ -329,7 +328,7 @@ struct CLElementwiseMax::Impl
};
CLElementwiseMax::CLElementwiseMax()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLElementwiseMax::CLElementwiseMax(CLElementwiseMax &&) = default;
@@ -346,7 +345,7 @@ void CLElementwiseMax::configure(const CLCompileContext &compile_context, ICLTen
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLElementwiseMax>();
+ _impl->op = std::make_unique<experimental::CLElementwiseMax>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info);
}
@@ -374,7 +373,7 @@ struct CLElementwiseMin::Impl
};
CLElementwiseMin::CLElementwiseMin()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLElementwiseMin::CLElementwiseMin(CLElementwiseMin &&) = default;
@@ -391,7 +390,7 @@ void CLElementwiseMin::configure(const CLCompileContext &compile_context, ICLTen
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLElementwiseMin>();
+ _impl->op = std::make_unique<experimental::CLElementwiseMin>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info);
}
@@ -419,7 +418,7 @@ struct CLElementwiseSquaredDiff::Impl
};
CLElementwiseSquaredDiff::CLElementwiseSquaredDiff()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLElementwiseSquaredDiff::CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&) = default;
@@ -436,7 +435,7 @@ void CLElementwiseSquaredDiff::configure(const CLCompileContext &compile_context
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLElementwiseSquaredDiff>();
+ _impl->op = std::make_unique<experimental::CLElementwiseSquaredDiff>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info);
}
@@ -464,7 +463,7 @@ struct CLElementwisePower::Impl
};
CLElementwisePower::CLElementwisePower()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLElementwisePower::CLElementwisePower(CLElementwisePower &&) = default;
@@ -481,7 +480,7 @@ void CLElementwisePower::configure(const CLCompileContext &compile_context, ICLT
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLElementwisePower>();
+ _impl->op = std::make_unique<experimental::CLElementwisePower>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info);
}
diff --git a/src/runtime/CL/functions/CLEqualizeHistogram.cpp b/src/runtime/CL/functions/CLEqualizeHistogram.cpp
index cc927a055b..11607cf71d 100644
--- a/src/runtime/CL/functions/CLEqualizeHistogram.cpp
+++ b/src/runtime/CL/functions/CLEqualizeHistogram.cpp
@@ -30,7 +30,6 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLHistogramKernel.h"
#include "src/core/CL/kernels/CLTableLookupKernel.h"
-#include "support/MemorySupport.h"
#include <algorithm>
#include <cmath>
@@ -86,9 +85,9 @@ void calculate_cum_dist_and_lut(CLDistribution1D &dist, CLDistribution1D &cum_di
} // namespace
CLEqualizeHistogram::CLEqualizeHistogram()
- : _histogram_kernel(support::cpp14::make_unique<CLHistogramKernel>()),
- _border_histogram_kernel(support::cpp14::make_unique<CLHistogramBorderKernel>()),
- _map_histogram_kernel(support::cpp14::make_unique<CLTableLookupKernel>()),
+ : _histogram_kernel(std::make_unique<CLHistogramKernel>()),
+ _border_histogram_kernel(std::make_unique<CLHistogramBorderKernel>()),
+ _map_histogram_kernel(std::make_unique<CLTableLookupKernel>()),
_hist(nr_bins, 0, max_range),
_cum_dist(nr_bins, 0, max_range),
_cd_lut(nr_bins, DataType::U8)
diff --git a/src/runtime/CL/functions/CLErode.cpp b/src/runtime/CL/functions/CLErode.cpp
index 6880c4845a..29551fc6bd 100644
--- a/src/runtime/CL/functions/CLErode.cpp
+++ b/src/runtime/CL/functions/CLErode.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/CL/kernels/CLErodeKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -39,7 +38,7 @@ void CLErode::configure(ICLTensor *input, ICLTensor *output, BorderMode border_m
void CLErode::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLErodeKernel>();
+ auto k = std::make_unique<CLErodeKernel>();
k->configure(compile_context, input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, BorderSize(1), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLFFT1D.cpp b/src/runtime/CL/functions/CLFFT1D.cpp
index a0078689ff..c434b4e570 100644
--- a/src/runtime/CL/functions/CLFFT1D.cpp
+++ b/src/runtime/CL/functions/CLFFT1D.cpp
@@ -30,15 +30,14 @@
#include "src/core/CL/kernels/CLFFTRadixStageKernel.h"
#include "src/core/CL/kernels/CLFFTScaleKernel.h"
#include "src/core/utils/helpers/fft.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLFFT1D::CLFFT1D(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
- _digit_reverse_kernel(support::cpp14::make_unique<CLFFTDigitReverseKernel>()),
+ _digit_reverse_kernel(std::make_unique<CLFFTDigitReverseKernel>()),
_fft_kernels(),
- _scale_kernel(support::cpp14::make_unique<CLFFTScaleKernel>()),
+ _scale_kernel(std::make_unique<CLFFTScaleKernel>()),
_digit_reversed_input(),
_digit_reverse_indices(),
_num_ffts(0),
@@ -90,7 +89,7 @@ void CLFFT1D::configure(const CLCompileContext &compile_context, const ICLTensor
fft_kernel_info.radix = radix_for_stage;
fft_kernel_info.Nx = Nx;
fft_kernel_info.is_first_stage = (i == 0);
- _fft_kernels.emplace_back(support::cpp14::make_unique<CLFFTRadixStageKernel>());
+ _fft_kernels.emplace_back(std::make_unique<CLFFTRadixStageKernel>());
_fft_kernels.back()->configure(compile_context, &_digit_reversed_input, ((i == (_num_ffts - 1)) && !is_c2r) ? output : nullptr, fft_kernel_info);
Nx *= radix_for_stage;
diff --git a/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp b/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp
index 5472e8469f..97b64b24f3 100644
--- a/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLFFTConvolutionLayer.cpp
@@ -39,8 +39,6 @@
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/utils/helpers/fft.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
namespace
@@ -168,7 +166,7 @@ void CLFFTConvolutionLayer::configure(const CLCompileContext &compile_context, I
_pad_weights_func.configure(compile_context, &_flipped_weights, &_padded_weights, padding_w);
// Transform weights
- _transform_weights_func = support::cpp14::make_unique<CLFFT2D>();
+ _transform_weights_func = std::make_unique<CLFFT2D>();
_transform_weights_func->configure(compile_context, &_padded_weights, &_transformed_weights, FFT2DInfo());
// Pad input
diff --git a/src/runtime/CL/functions/CLFastCorners.cpp b/src/runtime/CL/functions/CLFastCorners.cpp
index 110d2c3639..a3a62d6d5e 100644
--- a/src/runtime/CL/functions/CLFastCorners.cpp
+++ b/src/runtime/CL/functions/CLFastCorners.cpp
@@ -31,7 +31,6 @@
#include "arm_compute/runtime/ITensorAllocator.h"
#include "src/core/CL/kernels/CLFastCornersKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <algorithm>
#include <cstring>
@@ -40,9 +39,9 @@ using namespace arm_compute;
CLFastCorners::CLFastCorners(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
- _fast_corners_kernel(support::cpp14::make_unique<CLFastCornersKernel>()),
+ _fast_corners_kernel(std::make_unique<CLFastCornersKernel>()),
_suppr_func(),
- _copy_array_kernel(support::cpp14::make_unique<CLCopyToArrayKernel>()),
+ _copy_array_kernel(std::make_unique<CLCopyToArrayKernel>()),
_output(),
_suppr(),
_win(),
diff --git a/src/runtime/CL/functions/CLFill.cpp b/src/runtime/CL/functions/CLFill.cpp
index 855ed8380a..30843d8cc0 100644
--- a/src/runtime/CL/functions/CLFill.cpp
+++ b/src/runtime/CL/functions/CLFill.cpp
@@ -26,8 +26,6 @@
#include "arm_compute/core/Types.h"
#include "src/core/CL/kernels/CLMemsetKernel.h"
-#include "support/MemorySupport.h"
-
#include <utility>
namespace arm_compute
@@ -39,7 +37,7 @@ void CLFill::configure(ICLTensor *tensor, PixelValue constant_value)
void CLFill::configure(const CLCompileContext &compile_context, ICLTensor *tensor, PixelValue constant_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLMemsetKernel>();
+ auto k = std::make_unique<CLMemsetKernel>();
k->configure(compile_context, tensor, constant_value);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLFillBorder.cpp b/src/runtime/CL/functions/CLFillBorder.cpp
index 27d132b842..2e5a29ece1 100644
--- a/src/runtime/CL/functions/CLFillBorder.cpp
+++ b/src/runtime/CL/functions/CLFillBorder.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLFillBorder.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLFillBorder::configure(ICLTensor *tensor, unsigned int border_width, Borde
void CLFillBorder::configure(const CLCompileContext &compile_context, ICLTensor *tensor, unsigned int border_width, BorderMode border_mode, const PixelValue &constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLFillBorderKernel>();
+ auto k = std::make_unique<CLFillBorderKernel>();
k->configure(compile_context, tensor, BorderSize(border_width), border_mode, constant_border_value);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLFlattenLayer.cpp b/src/runtime/CL/functions/CLFlattenLayer.cpp
index 0646a0d3a0..c10e91bf96 100644
--- a/src/runtime/CL/functions/CLFlattenLayer.cpp
+++ b/src/runtime/CL/functions/CLFlattenLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLFlattenLayerKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -36,7 +35,7 @@ void CLFlattenLayer::configure(const ICLTensor *input, ICLTensor *output)
void CLFlattenLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLFlattenLayerKernel>();
+ auto k = std::make_unique<CLFlattenLayerKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
CLScheduler::get().tune_kernel_static(*_kernel);
diff --git a/src/runtime/CL/functions/CLFloor.cpp b/src/runtime/CL/functions/CLFloor.cpp
index 770e6a3781..5549d09b24 100644
--- a/src/runtime/CL/functions/CLFloor.cpp
+++ b/src/runtime/CL/functions/CLFloor.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLFloor.h"
#include "src/core/CL/kernels/CLFloorKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -35,7 +34,7 @@ void CLFloor::configure(const ICLTensor *input, ICLTensor *output)
void CLFloor::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLFloorKernel>();
+ auto k = std::make_unique<CLFloorKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index 1acf3c7a8b..46a90a54b7 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -42,7 +42,6 @@
#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
#include "src/core/CL/kernels/CLTransposeKernel.h"
#include "support/Cast.h"
-#include "support/MemorySupport.h"
#include <algorithm>
@@ -149,7 +148,7 @@ void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLT
void CLFullyConnectedLayerReshapeWeights::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
+ auto k = std::make_unique<CLTransposeKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLFuseBatchNormalization.cpp b/src/runtime/CL/functions/CLFuseBatchNormalization.cpp
index f018e5a8ae..2945508012 100644
--- a/src/runtime/CL/functions/CLFuseBatchNormalization.cpp
+++ b/src/runtime/CL/functions/CLFuseBatchNormalization.cpp
@@ -29,12 +29,11 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLFuseBatchNormalizationKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLFuseBatchNormalization::CLFuseBatchNormalization()
- : _fuse_bn_kernel(support::cpp14::make_unique<CLFuseBatchNormalizationKernel>())
+ : _fuse_bn_kernel(std::make_unique<CLFuseBatchNormalizationKernel>())
{
}
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index 57a5f9739e..181ae2843b 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -49,8 +49,6 @@
#include "src/runtime/CL/gemm/CLGEMMKernelSelection.h"
#include "support/Cast.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
using namespace arm_compute::misc::shape_calculator;
@@ -60,7 +58,7 @@ using namespace arm_compute::utils::cast;
namespace weights_transformations
{
CLGEMMReshapeRHSMatrixKernelManaged::CLGEMMReshapeRHSMatrixKernelManaged()
- : _kernel(support::cpp14::make_unique<CLGEMMReshapeRHSMatrixKernel>())
+ : _kernel(std::make_unique<CLGEMMReshapeRHSMatrixKernel>())
{
}
@@ -102,13 +100,13 @@ void CLGEMMReshapeRHSMatrixKernelManaged::configure(const CLCompileContext &comp
CLGEMM::CLGEMM(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
: _memory_group(std::move(memory_manager)),
_weights_manager(weights_manager),
- _mm_kernel(support::cpp14::make_unique<CLGEMMMatrixMultiplyKernel>()),
- _reshape_lhs_kernel(support::cpp14::make_unique<CLGEMMReshapeLHSMatrixKernel>()),
- _reshape_rhs_kernel(support::cpp14::make_unique<CLGEMMReshapeRHSMatrixKernel>()),
- _reshape_rhs_kernel_managed(support::cpp14::make_unique<weights_transformations::CLGEMMReshapeRHSMatrixKernelManaged>()),
- _mm_reshaped_kernel(support::cpp14::make_unique<CLGEMMMatrixMultiplyReshapedKernel>()),
- _mm_reshaped_only_rhs_kernel(support::cpp14::make_unique<CLGEMMMatrixMultiplyReshapedOnlyRHSKernel>()),
- _mm_reshaped_only_rhs_fallback_kernel(support::cpp14::make_unique<CLGEMMMatrixMultiplyReshapedOnlyRHSKernel>()),
+ _mm_kernel(std::make_unique<CLGEMMMatrixMultiplyKernel>()),
+ _reshape_lhs_kernel(std::make_unique<CLGEMMReshapeLHSMatrixKernel>()),
+ _reshape_rhs_kernel(std::make_unique<CLGEMMReshapeRHSMatrixKernel>()),
+ _reshape_rhs_kernel_managed(std::make_unique<weights_transformations::CLGEMMReshapeRHSMatrixKernelManaged>()),
+ _mm_reshaped_kernel(std::make_unique<CLGEMMMatrixMultiplyReshapedKernel>()),
+ _mm_reshaped_only_rhs_kernel(std::make_unique<CLGEMMMatrixMultiplyReshapedOnlyRHSKernel>()),
+ _mm_reshaped_only_rhs_fallback_kernel(std::make_unique<CLGEMMMatrixMultiplyReshapedOnlyRHSKernel>()),
_tmp_a(),
_tmp_b(),
_original_b(nullptr),
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 4d26df5e43..f37f06b0ff 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -46,7 +46,6 @@
#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "support/Cast.h"
-#include "support/MemorySupport.h"
#include <cmath>
#include <memory>
@@ -58,7 +57,7 @@ using namespace arm_compute::misc::shape_calculator;
using namespace arm_compute::utils::cast;
CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
- : _weights_reshape_kernel(support::cpp14::make_unique<CLWeightsReshapeKernel>())
+ : _weights_reshape_kernel(std::make_unique<CLWeightsReshapeKernel>())
{
}
@@ -117,9 +116,9 @@ void CLConvolutionLayerReshapeWeights::run()
}
CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
- : _memory_group(memory_manager), _weights_manager(weights_manager), _reshape_weights(), _reshape_weights_managed(), _im2col_kernel(support::cpp14::make_unique<CLIm2ColKernel>()),
- _mm_gemm(memory_manager, weights_manager), _mm_gemmlowp(memory_manager), _col2im_kernel(support::cpp14::make_unique<CLCol2ImKernel>()), _activationlayer_function(), _original_weights(nullptr),
- _im2col_output(), _weights_reshaped(), _gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _is_prepared(false)
+ : _memory_group(memory_manager), _weights_manager(weights_manager), _reshape_weights(), _reshape_weights_managed(), _im2col_kernel(std::make_unique<CLIm2ColKernel>()), _mm_gemm(memory_manager,
+ weights_manager), _mm_gemmlowp(memory_manager), _col2im_kernel(std::make_unique<CLCol2ImKernel>()), _activationlayer_function(), _original_weights(nullptr), _im2col_output(), _weights_reshaped(),
+ _gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _is_prepared(false)
{
}
diff --git a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
index 4d277f0982..a040e9d38e 100644
--- a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
@@ -43,7 +43,6 @@
#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
#include "src/core/CL/kernels/CLIm2ColKernel.h"
#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
-#include "support/MemorySupport.h"
#include <tuple>
@@ -114,7 +113,7 @@ CLGEMMDeconvolutionLayer::CLGEMMDeconvolutionLayer(std::shared_ptr<IMemoryManage
_permute_weights_to_nhwc(),
_reshape_weights(),
_transpose_weights(),
- _deconv_reshape(support::cpp14::make_unique<CLDeconvolutionReshapeOutputKernel>()),
+ _deconv_reshape(std::make_unique<CLDeconvolutionReshapeOutputKernel>()),
_slice_gemm(),
_gemmlowp_final(),
_reshaped_weights(),
diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
index d3d80a39e3..4bf5bde61e 100644
--- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
@@ -44,7 +44,6 @@
#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/runtime/CL/gemm/CLGEMMKernelSelection.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -79,14 +78,14 @@ inline bool is_gemm_reshaped(unsigned int m, unsigned int n, unsigned int k, Dat
CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
- _weights_to_qasymm8(support::cpp14::make_unique<CLDepthConvertLayerKernel>()),
- _mm_native_kernel(support::cpp14::make_unique<CLGEMMLowpMatrixMultiplyNativeKernel>()),
- _mm_reshaped_only_rhs_kernel(support::cpp14::make_unique<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel>()),
- _mtx_b_reshape_kernel(support::cpp14::make_unique<CLGEMMReshapeRHSMatrixKernel>()),
- _mtx_a_reduction_kernel(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
- _mtx_b_reduction_kernel(support::cpp14::make_unique<CLGEMMLowpMatrixBReductionKernel>()),
- _offset_contribution_kernel(support::cpp14::make_unique<CLGEMMLowpOffsetContributionKernel>()),
- _offset_contribution_output_stage_kernel(support::cpp14::make_unique<CLGEMMLowpOffsetContributionOutputStageKernel>()),
+ _weights_to_qasymm8(std::make_unique<CLDepthConvertLayerKernel>()),
+ _mm_native_kernel(std::make_unique<CLGEMMLowpMatrixMultiplyNativeKernel>()),
+ _mm_reshaped_only_rhs_kernel(std::make_unique<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel>()),
+ _mtx_b_reshape_kernel(std::make_unique<CLGEMMReshapeRHSMatrixKernel>()),
+ _mtx_a_reduction_kernel(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
+ _mtx_b_reduction_kernel(std::make_unique<CLGEMMLowpMatrixBReductionKernel>()),
+ _offset_contribution_kernel(std::make_unique<CLGEMMLowpOffsetContributionKernel>()),
+ _offset_contribution_output_stage_kernel(std::make_unique<CLGEMMLowpOffsetContributionOutputStageKernel>()),
_qasymm8_weights(),
_vector_sum_col(),
_vector_sum_row(),
diff --git a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp
index f9c5247d2d..be452aaf3d 100644
--- a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp
@@ -28,7 +28,6 @@
#include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h"
#include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h"
-#include "support/MemorySupport.h"
#include <algorithm>
@@ -52,7 +51,7 @@ void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const CLComp
info.gemmlowp_min_bound = min;
info.gemmlowp_max_bound = max;
info.output_data_type = DataType::QASYMM8;
- auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
+ auto k = std::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
k->configure(compile_context, input, bias, output, &info);
_kernel = std::move(k);
}
@@ -85,7 +84,7 @@ void CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::configure(const CLCompi
info.gemmlowp_min_bound = min;
info.gemmlowp_max_bound = max;
info.output_data_type = DataType::QASYMM8_SIGNED;
- auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
+ auto k = std::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
k->configure(compile_context, input, bias, output, &info);
_kernel = std::move(k);
}
@@ -117,7 +116,7 @@ void CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::configure(const CLComp
info.gemmlowp_min_bound = min;
info.gemmlowp_max_bound = max;
info.output_data_type = DataType::QSYMM16;
- auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
+ auto k = std::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
k->configure(compile_context, input, bias, output, &info);
_kernel = std::move(k);
}
@@ -145,21 +144,21 @@ void CLGEMMLowpOutputStage::configure(const CLCompileContext &compile_context, c
{
case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
{
- auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
+ auto k = std::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
k->configure(compile_context, input, bias, output, &info);
_kernel = std::move(k);
break;
}
case GEMMLowpOutputStageType::QUANTIZE_DOWN:
{
- auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleKernel>();
+ auto k = std::make_unique<CLGEMMLowpQuantizeDownInt32ScaleKernel>();
k->configure(compile_context, input, bias, output, &info);
_kernel = std::move(k);
break;
}
case GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT:
{
- auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel>();
+ auto k = std::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel>();
k->configure(compile_context, input, bias, output, &info);
_kernel = std::move(k);
break;
diff --git a/src/runtime/CL/functions/CLGather.cpp b/src/runtime/CL/functions/CLGather.cpp
index de6296f6a3..bde34dc4db 100644
--- a/src/runtime/CL/functions/CLGather.cpp
+++ b/src/runtime/CL/functions/CLGather.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "src/core/CL/kernels/CLGatherKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -36,7 +35,7 @@ void CLGather::configure(const ICLTensor *input, const ICLTensor *indices, ICLTe
void CLGather::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis)
{
- auto k = arm_compute::support::cpp14::make_unique<CLGatherKernel>();
+ auto k = std::make_unique<CLGatherKernel>();
k->configure(compile_context, input, indices, output, axis);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLGaussian3x3.cpp b/src/runtime/CL/functions/CLGaussian3x3.cpp
index 97db9ba06d..8eeade2f47 100644
--- a/src/runtime/CL/functions/CLGaussian3x3.cpp
+++ b/src/runtime/CL/functions/CLGaussian3x3.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGaussian3x3Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -39,7 +38,7 @@ void CLGaussian3x3::configure(ICLTensor *input, ICLTensor *output, BorderMode bo
void CLGaussian3x3::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLGaussian3x3Kernel>();
+ auto k = std::make_unique<CLGaussian3x3Kernel>();
k->configure(compile_context, input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLGaussian5x5.cpp b/src/runtime/CL/functions/CLGaussian5x5.cpp
index f7470d4ecf..ee72fcbe11 100644
--- a/src/runtime/CL/functions/CLGaussian5x5.cpp
+++ b/src/runtime/CL/functions/CLGaussian5x5.cpp
@@ -31,7 +31,6 @@
#include "arm_compute/runtime/ITensorAllocator.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLGaussian5x5Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -39,9 +38,9 @@ using namespace arm_compute;
CLGaussian5x5::CLGaussian5x5(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
- _kernel_hor(support::cpp14::make_unique<CLGaussian5x5HorKernel>()),
- _kernel_vert(support::cpp14::make_unique<CLGaussian5x5VertKernel>()),
- _border_handler(support::cpp14::make_unique<CLFillBorderKernel>()),
+ _kernel_hor(std::make_unique<CLGaussian5x5HorKernel>()),
+ _kernel_vert(std::make_unique<CLGaussian5x5VertKernel>()),
+ _border_handler(std::make_unique<CLFillBorderKernel>()),
_tmp()
{
}
diff --git a/src/runtime/CL/functions/CLGaussianPyramid.cpp b/src/runtime/CL/functions/CLGaussianPyramid.cpp
index 66b85352c1..9fe35f6f0e 100644
--- a/src/runtime/CL/functions/CLGaussianPyramid.cpp
+++ b/src/runtime/CL/functions/CLGaussianPyramid.cpp
@@ -38,7 +38,6 @@
#include "src/core/CL/kernels/CLGaussian5x5Kernel.h"
#include "src/core/CL/kernels/CLGaussianPyramidKernel.h"
#include "src/core/CL/kernels/CLScaleKernel.h"
-#include "support/MemorySupport.h"
#include <cstddef>
@@ -101,19 +100,19 @@ void CLGaussianPyramidHalf::configure(const CLCompileContext &compile_context, I
for(size_t i = 0; i < num_levels - 1; ++i)
{
/* Configure horizontal kernel */
- _horizontal_reduction.emplace_back(support::cpp14::make_unique<CLGaussianPyramidHorKernel>());
+ _horizontal_reduction.emplace_back(std::make_unique<CLGaussianPyramidHorKernel>());
_horizontal_reduction.back()->configure(compile_context, _pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i));
/* Configure vertical kernel */
- _vertical_reduction.emplace_back(support::cpp14::make_unique<CLGaussianPyramidVertKernel>());
+ _vertical_reduction.emplace_back(std::make_unique<CLGaussianPyramidVertKernel>());
_vertical_reduction.back()->configure(compile_context, _tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1));
/* Configure border */
- _horizontal_border_handler.emplace_back(support::cpp14::make_unique<CLFillBorderKernel>());
+ _horizontal_border_handler.emplace_back(std::make_unique<CLFillBorderKernel>());
_horizontal_border_handler.back()->configure(compile_context, _pyramid->get_pyramid_level(i), _horizontal_reduction.back()->border_size(), border_mode, PixelValue(constant_border_value));
/* Configure border */
- _vertical_border_handler.emplace_back(support::cpp14::make_unique<CLFillBorderKernel>());
+ _vertical_border_handler.emplace_back(std::make_unique<CLFillBorderKernel>());
_vertical_border_handler.back()->configure(compile_context, _tmp.get_pyramid_level(i), _vertical_reduction.back()->border_size(), border_mode, PixelValue(pixel_value_u16));
}
_tmp.allocate();
@@ -185,7 +184,7 @@ void CLGaussianPyramidOrb::configure(const CLCompileContext &compile_context, IC
_gauss5x5[i].configure(compile_context, _pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i), border_mode, constant_border_value);
/* Configure scale image kernel */
- _scale_nearest.emplace_back(support::cpp14::make_unique<CLScaleKernel>());
+ _scale_nearest.emplace_back(std::make_unique<CLScaleKernel>());
_scale_nearest.back()->configure(compile_context, _tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1), ScaleKernelInfo{ InterpolationPolicy::NEAREST_NEIGHBOR, border_mode, PixelValue(), SamplingPolicy::CENTER });
}
diff --git a/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp b/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp
index 87bf39030a..e536816f97 100644
--- a/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp
+++ b/src/runtime/CL/functions/CLGenerateProposalsLayer.cpp
@@ -32,22 +32,21 @@
#include "src/core/CL/kernels/CLPermuteKernel.h"
#include "src/core/CL/kernels/CLQuantizationLayerKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLGenerateProposalsLayer::CLGenerateProposalsLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(memory_manager),
- _permute_deltas_kernel(support::cpp14::make_unique<CLPermuteKernel>()),
+ _permute_deltas_kernel(std::make_unique<CLPermuteKernel>()),
_flatten_deltas(),
- _permute_scores_kernel(support::cpp14::make_unique<CLPermuteKernel>()),
+ _permute_scores_kernel(std::make_unique<CLPermuteKernel>()),
_flatten_scores(),
- _compute_anchors_kernel(support::cpp14::make_unique<CLComputeAllAnchorsKernel>()),
- _bounding_box_kernel(support::cpp14::make_unique<CLBoundingBoxTransformKernel>()),
- _pad_kernel(support::cpp14::make_unique<CLPadLayerKernel>()),
- _dequantize_anchors(support::cpp14::make_unique<CLDequantizationLayerKernel>()),
- _dequantize_deltas(support::cpp14::make_unique<CLDequantizationLayerKernel>()),
- _quantize_all_proposals(support::cpp14::make_unique<CLQuantizationLayerKernel>()),
+ _compute_anchors_kernel(std::make_unique<CLComputeAllAnchorsKernel>()),
+ _bounding_box_kernel(std::make_unique<CLBoundingBoxTransformKernel>()),
+ _pad_kernel(std::make_unique<CLPadLayerKernel>()),
+ _dequantize_anchors(std::make_unique<CLDequantizationLayerKernel>()),
+ _dequantize_deltas(std::make_unique<CLDequantizationLayerKernel>()),
+ _quantize_all_proposals(std::make_unique<CLQuantizationLayerKernel>()),
_cpp_nms(memory_manager),
_is_nhwc(false),
_is_qasymm8(false),
diff --git a/src/runtime/CL/functions/CLHOGDescriptor.cpp b/src/runtime/CL/functions/CLHOGDescriptor.cpp
index 80026532ab..8d9ea17d66 100644
--- a/src/runtime/CL/functions/CLHOGDescriptor.cpp
+++ b/src/runtime/CL/functions/CLHOGDescriptor.cpp
@@ -31,15 +31,14 @@
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLHOGDescriptorKernel.h"
#include "src/core/CL/kernels/CLMagnitudePhaseKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
CLHOGDescriptor::CLHOGDescriptor(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
_gradient(),
- _orient_bin(support::cpp14::make_unique<CLHOGOrientationBinningKernel>()),
- _block_norm(support::cpp14::make_unique<CLHOGBlockNormalizationKernel>()),
+ _orient_bin(std::make_unique<CLHOGOrientationBinningKernel>()),
+ _block_norm(std::make_unique<CLHOGBlockNormalizationKernel>()),
_mag(),
_phase(),
_hog_space()
diff --git a/src/runtime/CL/functions/CLHOGDetector.cpp b/src/runtime/CL/functions/CLHOGDetector.cpp
index 07ae8151c0..365021c723 100644
--- a/src/runtime/CL/functions/CLHOGDetector.cpp
+++ b/src/runtime/CL/functions/CLHOGDetector.cpp
@@ -26,14 +26,13 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLHOGDetectorKernel.h"
-#include "support/MemorySupport.h"
#include <algorithm>
using namespace arm_compute;
CLHOGDetector::CLHOGDetector()
- : _hog_detector_kernel(support::cpp14::make_unique<CLHOGDetectorKernel>()), _detection_windows(nullptr), _num_detection_windows()
+ : _hog_detector_kernel(std::make_unique<CLHOGDetectorKernel>()), _detection_windows(nullptr), _num_detection_windows()
{
}
diff --git a/src/runtime/CL/functions/CLHOGGradient.cpp b/src/runtime/CL/functions/CLHOGGradient.cpp
index 5f3b9cf529..f3aa527417 100644
--- a/src/runtime/CL/functions/CLHOGGradient.cpp
+++ b/src/runtime/CL/functions/CLHOGGradient.cpp
@@ -28,14 +28,13 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLMagnitudePhaseKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
CLHOGGradient::CLHOGGradient(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
_derivative(),
- _mag_phase(support::cpp14::make_unique<CLMagnitudePhaseKernel>()),
+ _mag_phase(std::make_unique<CLMagnitudePhaseKernel>()),
_gx(),
_gy()
{
diff --git a/src/runtime/CL/functions/CLHOGMultiDetection.cpp b/src/runtime/CL/functions/CLHOGMultiDetection.cpp
index dfc90537cf..2464e6cf9f 100644
--- a/src/runtime/CL/functions/CLHOGMultiDetection.cpp
+++ b/src/runtime/CL/functions/CLHOGMultiDetection.cpp
@@ -34,7 +34,6 @@
#include "src/core/CL/kernels/CLHOGDescriptorKernel.h"
#include "src/core/CL/kernels/CLHOGDetectorKernel.h"
#include "src/core/CL/kernels/CLMagnitudePhaseKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -188,7 +187,7 @@ void CLHOGMultiDetection::configure(const CLCompileContext &compile_context, ICL
_memory_group.manage(&_hog_space[i]);
// Initialise orientation binning kernel
- _orient_bin_kernel.emplace_back(support::cpp14::make_unique<CLHOGOrientationBinningKernel>());
+ _orient_bin_kernel.emplace_back(std::make_unique<CLHOGOrientationBinningKernel>());
_orient_bin_kernel.back()->configure(compile_context, &_mag, &_phase, &_hog_space[i], multi_hog->model(idx_multi_hog)->info());
}
@@ -210,7 +209,7 @@ void CLHOGMultiDetection::configure(const CLCompileContext &compile_context, ICL
_memory_group.manage(&_hog_norm_space[i]);
// Initialize block normalization kernel
- _block_norm_kernel.emplace_back(support::cpp14::make_unique<CLHOGBlockNormalizationKernel>());
+ _block_norm_kernel.emplace_back(std::make_unique<CLHOGBlockNormalizationKernel>());
_block_norm_kernel.back()->configure(compile_context, &_hog_space[idx_orient_bin], &_hog_norm_space[i], multi_hog->model(idx_multi_hog)->info());
}
diff --git a/src/runtime/CL/functions/CLHarrisCorners.cpp b/src/runtime/CL/functions/CLHarrisCorners.cpp
index 9d8ebceb30..37f428c677 100644
--- a/src/runtime/CL/functions/CLHarrisCorners.cpp
+++ b/src/runtime/CL/functions/CLHarrisCorners.cpp
@@ -37,7 +37,6 @@
#include "src/core/CL/kernels/CLHarrisCornersKernel.h"
#include "src/core/CL/kernels/CLSobel5x5Kernel.h"
#include "src/core/CL/kernels/CLSobel7x7Kernel.h"
-#include "support/MemorySupport.h"
#include <cmath>
#include <utility>
@@ -47,12 +46,12 @@ using namespace arm_compute;
CLHarrisCorners::CLHarrisCorners(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
: _memory_group(std::move(memory_manager)),
_sobel(nullptr),
- _harris_score(support::cpp14::make_unique<CLHarrisScoreKernel>()),
+ _harris_score(std::make_unique<CLHarrisScoreKernel>()),
_non_max_suppr(),
_candidates(),
_sort_euclidean(),
- _border_gx(support::cpp14::make_unique<CLFillBorderKernel>()),
- _border_gy(support::cpp14::make_unique<CLFillBorderKernel>()),
+ _border_gx(std::make_unique<CLFillBorderKernel>()),
+ _border_gy(std::make_unique<CLFillBorderKernel>()),
_gx(),
_gy(),
_score(),
@@ -106,21 +105,21 @@ void CLHarrisCorners::configure(const CLCompileContext &compile_context, ICLImag
{
case 3:
{
- auto k = arm_compute::support::cpp14::make_unique<CLSobel3x3>();
+ auto k = std::make_unique<CLSobel3x3>();
k->configure(compile_context, input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
break;
}
case 5:
{
- auto k = arm_compute::support::cpp14::make_unique<CLSobel5x5>();
+ auto k = std::make_unique<CLSobel5x5>();
k->configure(compile_context, input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
break;
}
case 7:
{
- auto k = arm_compute::support::cpp14::make_unique<CLSobel7x7>();
+ auto k = std::make_unique<CLSobel7x7>();
k->configure(compile_context, input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
break;
diff --git a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
index bd680f448d..9bc060e6ca 100644
--- a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/Types.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -41,7 +40,7 @@ void CLInstanceNormalizationLayer::configure(ICLTensor *input, ICLTensor *output
void CLInstanceNormalizationLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
{
- auto k = arm_compute::support::cpp14::make_unique<CLInstanceNormalizationLayerKernel>();
+ auto k = std::make_unique<CLInstanceNormalizationLayerKernel>();
k->configure(compile_context, input, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLIntegralImage.cpp b/src/runtime/CL/functions/CLIntegralImage.cpp
index 41e47e77c7..56a151a085 100644
--- a/src/runtime/CL/functions/CLIntegralImage.cpp
+++ b/src/runtime/CL/functions/CLIntegralImage.cpp
@@ -25,13 +25,12 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLIntegralImageKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
CLIntegralImage::CLIntegralImage()
- : _integral_hor(support::cpp14::make_unique<CLIntegralImageHorKernel>()),
- _integral_vert(support::cpp14::make_unique<CLIntegralImageVertKernel>())
+ : _integral_hor(std::make_unique<CLIntegralImageHorKernel>()),
+ _integral_vert(std::make_unique<CLIntegralImageVertKernel>())
{
}
diff --git a/src/runtime/CL/functions/CLL2NormalizeLayer.cpp b/src/runtime/CL/functions/CLL2NormalizeLayer.cpp
index 64aac269cd..8c360aaa9e 100644
--- a/src/runtime/CL/functions/CLL2NormalizeLayer.cpp
+++ b/src/runtime/CL/functions/CLL2NormalizeLayer.cpp
@@ -32,7 +32,6 @@
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLL2NormalizeLayerKernel.h"
#include "src/core/CL/kernels/CLReductionOperationKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -44,7 +43,7 @@ constexpr int max_input_tensor_dim = 3;
CLL2NormalizeLayer::CLL2NormalizeLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
_reduce_func(),
- _normalize_kernel(support::cpp14::make_unique<CLL2NormalizeLayerKernel>()),
+ _normalize_kernel(std::make_unique<CLL2NormalizeLayerKernel>()),
_sumsq()
{
}
diff --git a/src/runtime/CL/functions/CLLSTMLayer.cpp b/src/runtime/CL/functions/CLLSTMLayer.cpp
index b095c06535..77df917119 100644
--- a/src/runtime/CL/functions/CLLSTMLayer.cpp
+++ b/src/runtime/CL/functions/CLLSTMLayer.cpp
@@ -44,7 +44,6 @@
#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
#include "src/core/CL/kernels/CLMemsetKernel.h"
#include "src/core/CL/kernels/CLTransposeKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -54,10 +53,10 @@ using namespace arm_compute::utils::info_helpers;
CLLSTMLayer::CLLSTMLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _fully_connected_input_gate(), _accum_input_gate1(), _subtract_input_gate(), _pixelwise_mul_input_gate(), _activation_input_gate(),
_fully_connected_forget_gate(), _accum_forget_gate1(), _pixelwise_mul_forget_gate(), _activation_forget_gate(), _fully_connected_cell_state(), _gemm_cell_state1(),
- _transpose_cell_state(support::cpp14::make_unique<CLTransposeKernel>()), _accum_cell_state1(), _accum_cell_state2(), _pixelwise_mul_cell_state1(), _activation_cell_state(), _cell_clip(),
+ _transpose_cell_state(std::make_unique<CLTransposeKernel>()), _accum_cell_state1(), _accum_cell_state2(), _pixelwise_mul_cell_state1(), _activation_cell_state(), _cell_clip(),
_pixelwise_mul_cell_state2(), _fully_connected_output(), _pixelwise_mul_output_state1(), _accum_output1(), _activation_output(), _activation_output_state(), _pixelwise_mul_output_state2(),
- _fully_connected_output_state(), _projection_clip(), _copy_cell_state(support::cpp14::make_unique<CLCopyKernel>()), _copy_output(support::cpp14::make_unique<CLCopyKernel>()), _concat_scratch_buffer(),
- _concat_inputs_forget_gate(), _concat_weights_forget_gate(), _concat_weights_input_gate(), _concat_weights_output(), _ones_memset_kernel(support::cpp14::make_unique<CLMemsetKernel>()),
+ _fully_connected_output_state(), _projection_clip(), _copy_cell_state(std::make_unique<CLCopyKernel>()), _copy_output(std::make_unique<CLCopyKernel>()), _concat_scratch_buffer(),
+ _concat_inputs_forget_gate(), _concat_weights_forget_gate(), _concat_weights_input_gate(), _concat_weights_output(), _ones_memset_kernel(std::make_unique<CLMemsetKernel>()),
_mean_std_norm_input_gate(), _pixelwise_mul_input_gate_coeff(), _accum_input_gate_bias(), _mean_std_norm_forget_gate(), _pixelwise_mul_forget_gate_coeff(), _accum_forget_gate_bias(),
_mean_std_norm_cell_gate(), _pixelwise_mul_cell_gate_coeff(), _accum_cell_gate_bias(), _mean_std_norm_output_gate(), _pixelwise_mul_output_gate_coeff(), _accum_output_gate_bias(), _input_gate_out1(),
_input_gate_out2(), _input_gate_out3(), _input_gate_out4(), _forget_gate_out1(), _forget_gate_out2(), _forget_gate_out3(), _forget_gate_out4(), _forget_gate_out5(), _forget_gate_out6(),
diff --git a/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp b/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
index 04e59ac4a6..3adae07095 100644
--- a/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
@@ -31,7 +31,6 @@
#include "src/core/CL/kernels/CLIm2ColKernel.h"
#include "src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.h"
#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
-#include "support/MemorySupport.h"
#include <cmath>
#include <tuple>
@@ -84,10 +83,10 @@ void calculate_shapes(const ITensorInfo *input, const ITensorInfo *weights, cons
CLLocallyConnectedLayer::CLLocallyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
- _input_im2col_kernel(support::cpp14::make_unique<CLIm2ColKernel>()),
- _weights_reshape_kernel(support::cpp14::make_unique<CLWeightsReshapeKernel>()),
- _mm_kernel(support::cpp14::make_unique<CLLocallyConnectedMatrixMultiplyKernel>()),
- _output_col2im_kernel(support::cpp14::make_unique<CLCol2ImKernel>()),
+ _input_im2col_kernel(std::make_unique<CLIm2ColKernel>()),
+ _weights_reshape_kernel(std::make_unique<CLWeightsReshapeKernel>()),
+ _mm_kernel(std::make_unique<CLLocallyConnectedMatrixMultiplyKernel>()),
+ _output_col2im_kernel(std::make_unique<CLCol2ImKernel>()),
_input_im2col_reshaped(),
_weights_reshaped(),
_gemm_output(),
diff --git a/src/runtime/CL/functions/CLLogicalAnd.cpp b/src/runtime/CL/functions/CLLogicalAnd.cpp
index 55d3dc523b..f1c53651c7 100644
--- a/src/runtime/CL/functions/CLLogicalAnd.cpp
+++ b/src/runtime/CL/functions/CLLogicalAnd.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLLogicalAnd.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "src/core/CL/kernels/CLElementwiseOperationKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ namespace experimental
{
void CLLogicalAnd::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLLogicalBinaryKernel>();
+ auto k = std::make_unique<CLLogicalBinaryKernel>();
k->configure(compile_context, kernels::LogicalOperation::And, input1, input2, output);
_kernel = std::move(k);
}
@@ -59,7 +58,7 @@ struct CLLogicalAnd::Impl
};
CLLogicalAnd::CLLogicalAnd()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLLogicalAnd::CLLogicalAnd(CLLogicalAnd &&) = default;
@@ -76,7 +75,7 @@ void CLLogicalAnd::configure(const CLCompileContext &compile_context, ICLTensor
_impl->src0 = input1;
_impl->src1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLLogicalAnd>();
+ _impl->op = std::make_unique<experimental::CLLogicalAnd>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info());
}
diff --git a/src/runtime/CL/functions/CLLogicalNot.cpp b/src/runtime/CL/functions/CLLogicalNot.cpp
index 67aa3192f8..d3774da597 100644
--- a/src/runtime/CL/functions/CLLogicalNot.cpp
+++ b/src/runtime/CL/functions/CLLogicalNot.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLLogicalNot.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "src/core/CL/kernels/CLElementWiseUnaryLayerKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ namespace experimental
{
void CLLogicalNot::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
+ auto k = std::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::LOGICAL_NOT);
_kernel = std::move(k);
}
@@ -58,7 +57,7 @@ struct CLLogicalNot::Impl
};
CLLogicalNot::CLLogicalNot()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLLogicalNot::CLLogicalNot(CLLogicalNot &&) = default;
@@ -74,7 +73,7 @@ void CLLogicalNot::configure(const CLCompileContext &compile_context, const ICLT
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLLogicalNot>();
+ _impl->op = std::make_unique<experimental::CLLogicalNot>();
_impl->op->configure(compile_context, input->info(), output->info());
}
diff --git a/src/runtime/CL/functions/CLLogicalOr.cpp b/src/runtime/CL/functions/CLLogicalOr.cpp
index b5be3cf816..8c6087ed7d 100644
--- a/src/runtime/CL/functions/CLLogicalOr.cpp
+++ b/src/runtime/CL/functions/CLLogicalOr.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLLogicalOr.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "src/core/CL/kernels/CLElementwiseOperationKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ namespace experimental
{
void CLLogicalOr::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLLogicalBinaryKernel>();
+ auto k = std::make_unique<CLLogicalBinaryKernel>();
k->configure(compile_context, kernels::LogicalOperation::Or, input1, input2, output);
_kernel = std::move(k);
}
@@ -59,7 +58,7 @@ struct CLLogicalOr::Impl
};
CLLogicalOr::CLLogicalOr()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLLogicalOr::CLLogicalOr(CLLogicalOr &&) = default;
@@ -76,7 +75,7 @@ void CLLogicalOr::configure(const CLCompileContext &compile_context, ICLTensor *
_impl->src0 = input1;
_impl->src1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLLogicalOr>();
+ _impl->op = std::make_unique<experimental::CLLogicalOr>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info());
}
diff --git a/src/runtime/CL/functions/CLMagnitude.cpp b/src/runtime/CL/functions/CLMagnitude.cpp
index fb3ebdaa96..0599a11fa1 100644
--- a/src/runtime/CL/functions/CLMagnitude.cpp
+++ b/src/runtime/CL/functions/CLMagnitude.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLMagnitude.h"
#include "src/core/CL/kernels/CLMagnitudePhaseKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLMagnitude::configure(const ICLTensor *input1, const ICLTensor *input2, IC
void CLMagnitude::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, MagnitudeType mag_type)
{
- auto k = arm_compute::support::cpp14::make_unique<CLMagnitudePhaseKernel>();
+ auto k = std::make_unique<CLMagnitudePhaseKernel>();
k->configure(compile_context, input1, input2, output, nullptr, mag_type);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLMaxUnpoolingLayer.cpp b/src/runtime/CL/functions/CLMaxUnpoolingLayer.cpp
index 392bff2b4e..c9deb301ef 100644
--- a/src/runtime/CL/functions/CLMaxUnpoolingLayer.cpp
+++ b/src/runtime/CL/functions/CLMaxUnpoolingLayer.cpp
@@ -29,13 +29,12 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLMaxUnpoolingLayerKernel.h"
#include "src/core/CL/kernels/CLMemsetKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLMaxUnpoolingLayer::CLMaxUnpoolingLayer()
- : _memset_kernel(support::cpp14::make_unique<CLMemsetKernel>()),
- _unpooling_layer_kernel(support::cpp14::make_unique<CLMaxUnpoolingLayerKernel>())
+ : _memset_kernel(std::make_unique<CLMemsetKernel>()),
+ _unpooling_layer_kernel(std::make_unique<CLMaxUnpoolingLayerKernel>())
{
}
diff --git a/src/runtime/CL/functions/CLMeanStdDev.cpp b/src/runtime/CL/functions/CLMeanStdDev.cpp
index c91bc954b8..d8cd41d45f 100644
--- a/src/runtime/CL/functions/CLMeanStdDev.cpp
+++ b/src/runtime/CL/functions/CLMeanStdDev.cpp
@@ -28,7 +28,6 @@
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLMeanStdDevKernel.h"
#include "src/core/CL/kernels/CLReductionOperationKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -43,8 +42,8 @@ CLMeanStdDev::CLMeanStdDev(std::shared_ptr<IMemoryManager> memory_manager) // NO
_reduction_output_stddev(),
_mean(nullptr),
_stddev(nullptr),
- _mean_stddev_kernel(support::cpp14::make_unique<CLMeanStdDevKernel>()),
- _fill_border_kernel(support::cpp14::make_unique<CLFillBorderKernel>()),
+ _mean_stddev_kernel(std::make_unique<CLMeanStdDevKernel>()),
+ _fill_border_kernel(std::make_unique<CLFillBorderKernel>()),
_global_sum(),
_global_sum_squared()
{
diff --git a/src/runtime/CL/functions/CLMeanStdDevNormalizationLayer.cpp b/src/runtime/CL/functions/CLMeanStdDevNormalizationLayer.cpp
index 5b5ff49ecb..0f6a0e47a4 100644
--- a/src/runtime/CL/functions/CLMeanStdDevNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLMeanStdDevNormalizationLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/Types.h"
#include "src/core/CL/kernels/CLMeanStdDevNormalizationKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -36,7 +35,7 @@ void CLMeanStdDevNormalizationLayer::configure(ICLTensor *input, ICLTensor *outp
void CLMeanStdDevNormalizationLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float epsilon)
{
- auto k = arm_compute::support::cpp14::make_unique<CLMeanStdDevNormalizationKernel>();
+ auto k = std::make_unique<CLMeanStdDevNormalizationKernel>();
k->configure(compile_context, input, output, epsilon);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLMedian3x3.cpp b/src/runtime/CL/functions/CLMedian3x3.cpp
index 2040ebd4f5..b32063a8fe 100644
--- a/src/runtime/CL/functions/CLMedian3x3.cpp
+++ b/src/runtime/CL/functions/CLMedian3x3.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLMedian3x3Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -39,7 +38,7 @@ void CLMedian3x3::configure(ICLTensor *input, ICLTensor *output, BorderMode bord
void CLMedian3x3::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLMedian3x3Kernel>();
+ auto k = std::make_unique<CLMedian3x3Kernel>();
k->configure(compile_context, input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLMinMaxLocation.cpp b/src/runtime/CL/functions/CLMinMaxLocation.cpp
index 3ddd4d04ed..ace6a1cb21 100644
--- a/src/runtime/CL/functions/CLMinMaxLocation.cpp
+++ b/src/runtime/CL/functions/CLMinMaxLocation.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/CL/functions/CLMinMaxLocation.h"
#include "arm_compute/core/CL/CLHelpers.h"
#include "src/core/CL/kernels/CLMinMaxLocationKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLMinMaxLocation::CLMinMaxLocation()
- : _min_max_kernel(support::cpp14::make_unique<CLMinMaxKernel>()),
- _min_max_loc_kernel(support::cpp14::make_unique<CLMinMaxLocationKernel>()),
+ : _min_max_kernel(std::make_unique<CLMinMaxKernel>()),
+ _min_max_loc_kernel(std::make_unique<CLMinMaxLocationKernel>()),
_min_max_vals(),
_min_max_count_vals(),
_min(nullptr),
diff --git a/src/runtime/CL/functions/CLNonLinearFilter.cpp b/src/runtime/CL/functions/CLNonLinearFilter.cpp
index 3312f6f9a7..ec88f879b7 100644
--- a/src/runtime/CL/functions/CLNonLinearFilter.cpp
+++ b/src/runtime/CL/functions/CLNonLinearFilter.cpp
@@ -25,7 +25,6 @@
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLNonLinearFilterKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -40,7 +39,7 @@ void CLNonLinearFilter::configure(ICLTensor *input, ICLTensor *output, NonLinear
void CLNonLinearFilter::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, NonLinearFilterFunction function, unsigned int mask_size, MatrixPattern pattern,
const uint8_t *mask, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLNonLinearFilterKernel>();
+ auto k = std::make_unique<CLNonLinearFilterKernel>();
k->configure(compile_context, input, output, function, mask_size, pattern, mask, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLNonMaximaSuppression3x3.cpp b/src/runtime/CL/functions/CLNonMaximaSuppression3x3.cpp
index 22ca176a71..5906ea5a4b 100644
--- a/src/runtime/CL/functions/CLNonMaximaSuppression3x3.cpp
+++ b/src/runtime/CL/functions/CLNonMaximaSuppression3x3.cpp
@@ -25,7 +25,6 @@
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLNonMaximaSuppression3x3Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -38,7 +37,7 @@ void CLNonMaximaSuppression3x3::configure(ICLTensor *input, ICLTensor *output, B
void CLNonMaximaSuppression3x3::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, BorderMode border_mode)
{
- auto k = arm_compute::support::cpp14::make_unique<CLNonMaximaSuppression3x3Kernel>();
+ auto k = std::make_unique<CLNonMaximaSuppression3x3Kernel>();
k->configure(compile_context, input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
diff --git a/src/runtime/CL/functions/CLNormalizationLayer.cpp b/src/runtime/CL/functions/CLNormalizationLayer.cpp
index 40a6cdd2f4..ec6fa803f5 100644
--- a/src/runtime/CL/functions/CLNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLNormalizationLayer.cpp
@@ -32,13 +32,12 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLNormalizationLayerKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
CLNormalizationLayer::CLNormalizationLayer()
- : _norm_kernel(support::cpp14::make_unique<CLNormalizationLayerKernel>()),
- _border_handler(support::cpp14::make_unique<CLFillBorderKernel>())
+ : _norm_kernel(std::make_unique<CLNormalizationLayerKernel>()),
+ _border_handler(std::make_unique<CLFillBorderKernel>())
{
}
diff --git a/src/runtime/CL/functions/CLNormalizePlanarYUVLayer.cpp b/src/runtime/CL/functions/CLNormalizePlanarYUVLayer.cpp
index 9576486db0..70189a2cb6 100644
--- a/src/runtime/CL/functions/CLNormalizePlanarYUVLayer.cpp
+++ b/src/runtime/CL/functions/CLNormalizePlanarYUVLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h"
#include "src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -38,7 +37,7 @@ void CLNormalizePlanarYUVLayer::configure(const ICLTensor *input, ICLTensor *out
void CLNormalizePlanarYUVLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *std)
{
- auto k = arm_compute::support::cpp14::make_unique<CLNormalizePlanarYUVLayerKernel>();
+ auto k = std::make_unique<CLNormalizePlanarYUVLayerKernel>();
k->configure(compile_context, input, output, mean, std);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLOpticalFlow.cpp b/src/runtime/CL/functions/CLOpticalFlow.cpp
index fca6192296..76e0ac5f0b 100644
--- a/src/runtime/CL/functions/CLOpticalFlow.cpp
+++ b/src/runtime/CL/functions/CLOpticalFlow.cpp
@@ -34,7 +34,6 @@
#include "arm_compute/runtime/CL/functions/CLScharr3x3.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLLKTrackerKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -43,7 +42,7 @@ CLOpticalFlow::CLOpticalFlow(std::shared_ptr<IMemoryManager> memory_manager) //
_tracker_init_kernel(),
_tracker_stage0_kernel(),
_tracker_stage1_kernel(),
- _tracker_finalize_kernel(support::cpp14::make_unique<CLLKTrackerFinalizeKernel>()),
+ _tracker_finalize_kernel(std::make_unique<CLLKTrackerFinalizeKernel>()),
_func_scharr(),
_scharr_gx(),
_scharr_gy(),
@@ -104,13 +103,13 @@ void CLOpticalFlow::configure(const CLCompileContext &compile_context, const CLP
_scharr_gy.resize(_num_levels);
// Create internal keypoint arrays
- _old_points_internal = arm_compute::support::cpp14::make_unique<CLLKInternalKeypointArray>(list_length);
+ _old_points_internal = std::make_unique<CLLKInternalKeypointArray>(list_length);
_old_points_internal->resize(list_length);
- _new_points_internal = arm_compute::support::cpp14::make_unique<CLLKInternalKeypointArray>(list_length);
+ _new_points_internal = std::make_unique<CLLKInternalKeypointArray>(list_length);
_new_points_internal->resize(list_length);
- _coefficient_table = arm_compute::support::cpp14::make_unique<CLCoefficientTableArray>(list_length);
+ _coefficient_table = std::make_unique<CLCoefficientTableArray>(list_length);
_coefficient_table->resize(list_length);
- _old_values = arm_compute::support::cpp14::make_unique<CLOldValueArray>(old_values_list_length);
+ _old_values = std::make_unique<CLOldValueArray>(old_values_list_length);
_old_values->resize(old_values_list_length);
_new_points->resize(list_length);
@@ -137,17 +136,17 @@ void CLOpticalFlow::configure(const CLCompileContext &compile_context, const CLP
_func_scharr[i].configure(compile_context, old_ith_input, &_scharr_gx[i], &_scharr_gy[i], border_mode, constant_border_value);
// Init Lucas-Kanade init kernel
- _tracker_init_kernel.emplace_back(support::cpp14::make_unique<CLLKTrackerInitKernel>());
+ _tracker_init_kernel.emplace_back(std::make_unique<CLLKTrackerInitKernel>());
_tracker_init_kernel.back()->configure(compile_context, old_points, new_points_estimates, _old_points_internal.get(), _new_points_internal.get(), use_initial_estimate, i, _num_levels, pyr_scale);
// Init Lucas-Kanade stage0 kernel
- _tracker_stage0_kernel.emplace_back(support::cpp14::make_unique<CLLKTrackerStage0Kernel>());
+ _tracker_stage0_kernel.emplace_back(std::make_unique<CLLKTrackerStage0Kernel>());
_tracker_stage0_kernel.back()->configure(compile_context, old_ith_input, &_scharr_gx[i], &_scharr_gy[i],
_old_points_internal.get(), _new_points_internal.get(), _coefficient_table.get(), _old_values.get(),
window_dimension, i);
// Init Lucas-Kanade stage1 kernel
- _tracker_stage1_kernel.emplace_back(support::cpp14::make_unique<CLLKTrackerStage1Kernel>());
+ _tracker_stage1_kernel.emplace_back(std::make_unique<CLLKTrackerStage1Kernel>());
_tracker_stage1_kernel.back()->configure(compile_context, new_ith_input, _new_points_internal.get(), _coefficient_table.get(), _old_values.get(),
termination, epsilon, num_iterations, window_dimension, i);
diff --git a/src/runtime/CL/functions/CLPReluLayer.cpp b/src/runtime/CL/functions/CLPReluLayer.cpp
index 60cf4d1a2d..876b5de0f7 100644
--- a/src/runtime/CL/functions/CLPReluLayer.cpp
+++ b/src/runtime/CL/functions/CLPReluLayer.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/CL/functions/CLPReluLayer.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -38,7 +37,7 @@ CLPReluLayer::CLPReluLayer()
void CLPReluLayer::configure(const CLCompileContext &compile_context, ITensorInfo *input, ITensorInfo *alpha, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLArithmeticOperationKernel>();
+ auto k = std::make_unique<CLArithmeticOperationKernel>();
k->configure(compile_context, ArithmeticOperation::PRELU, input, alpha, output);
_kernel = std::move(k);
}
@@ -63,7 +62,7 @@ struct CLPReluLayer::Impl
};
CLPReluLayer::CLPReluLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLPReluLayer::CLPReluLayer(CLPReluLayer &&) = default;
@@ -80,7 +79,7 @@ void CLPReluLayer::configure(const CLCompileContext &compile_context, ICLTensor
_impl->src_0 = input;
_impl->src_1 = alpha;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLPReluLayer>();
+ _impl->op = std::make_unique<experimental::CLPReluLayer>();
_impl->op->configure(compile_context, input->info(), alpha->info(), output->info());
}
diff --git a/src/runtime/CL/functions/CLPadLayer.cpp b/src/runtime/CL/functions/CLPadLayer.cpp
index 388b07b76e..8c5d529117 100644
--- a/src/runtime/CL/functions/CLPadLayer.cpp
+++ b/src/runtime/CL/functions/CLPadLayer.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/CL/functions/CLPadLayer.h"
#include "src/core/CL/kernels/CLCopyKernel.h"
#include "src/core/CL/kernels/CLPadLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLPadLayer::CLPadLayer()
- : _pad_kernel(support::cpp14::make_unique<CLPadLayerKernel>()),
- _copy_kernel(support::cpp14::make_unique<CLCopyKernel>()),
+ : _pad_kernel(std::make_unique<CLPadLayerKernel>()),
+ _copy_kernel(std::make_unique<CLCopyKernel>()),
_perform_pad(false)
{
}
diff --git a/src/runtime/CL/functions/CLPermute.cpp b/src/runtime/CL/functions/CLPermute.cpp
index f7f0bc4f5d..31b152c553 100644
--- a/src/runtime/CL/functions/CLPermute.cpp
+++ b/src/runtime/CL/functions/CLPermute.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Error.h"
#include "src/core/CL/kernels/CLPermuteKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -37,7 +36,7 @@ void CLPermute::configure(const ICLTensor *input, ICLTensor *output, const Permu
void CLPermute::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PermutationVector &perm)
{
- auto k = arm_compute::support::cpp14::make_unique<CLPermuteKernel>();
+ auto k = std::make_unique<CLPermuteKernel>();
k->configure(compile_context, input, output, perm);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLPhase.cpp b/src/runtime/CL/functions/CLPhase.cpp
index 6594cd5bac..b2ff5d05ca 100644
--- a/src/runtime/CL/functions/CLPhase.cpp
+++ b/src/runtime/CL/functions/CLPhase.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLPhase.h"
#include "src/core/CL/kernels/CLMagnitudePhaseKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLPhase::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTen
void CLPhase::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, PhaseType phase_type)
{
- auto k = arm_compute::support::cpp14::make_unique<CLMagnitudePhaseKernel>();
+ auto k = std::make_unique<CLMagnitudePhaseKernel>();
k->configure(compile_context, input1, input2, nullptr, output, MagnitudeType::L1NORM, phase_type);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp b/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
index 12cc5d60af..a56018b397 100644
--- a/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
+++ b/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLPixelWiseMultiplicationKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -56,14 +55,14 @@ ITensorPack select_border_input(ITensorPack &tensors)
namespace experimental
{
CLPixelWiseMultiplication::CLPixelWiseMultiplication()
- : _border_handler(support::cpp14::make_unique<CLFillBorderKernel>())
+ : _border_handler(std::make_unique<CLFillBorderKernel>())
{
}
void CLPixelWiseMultiplication::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale,
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLPixelWiseMultiplicationKernel>();
+ auto k = std::make_unique<CLPixelWiseMultiplicationKernel>();
k->configure(compile_context, input1, input2, output, scale, overflow_policy, rounding_policy, act_info);
_kernel = std::move(k);
@@ -92,13 +91,13 @@ void CLPixelWiseMultiplication::run(ITensorPack &tensors)
}
CLComplexPixelWiseMultiplication::CLComplexPixelWiseMultiplication()
- : _border_handler(support::cpp14::make_unique<CLFillBorderKernel>())
+ : _border_handler(std::make_unique<CLFillBorderKernel>())
{
}
void CLComplexPixelWiseMultiplication::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLComplexPixelWiseMultiplicationKernel>();
+ auto k = std::make_unique<CLComplexPixelWiseMultiplicationKernel>();
k->configure(compile_context, input1, input2, output, act_info);
_kernel = std::move(k);
@@ -135,7 +134,7 @@ struct CLPixelWiseMultiplication::Impl
};
CLPixelWiseMultiplication::CLPixelWiseMultiplication()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLPixelWiseMultiplication::CLPixelWiseMultiplication(CLPixelWiseMultiplication &&) = default;
@@ -154,7 +153,7 @@ void CLPixelWiseMultiplication::configure(const CLCompileContext &compile_contex
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLPixelWiseMultiplication>();
+ _impl->op = std::make_unique<experimental::CLPixelWiseMultiplication>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), scale, overflow_policy, rounding_policy, act_info);
}
@@ -183,7 +182,7 @@ struct CLComplexPixelWiseMultiplication::Impl
};
CLComplexPixelWiseMultiplication::CLComplexPixelWiseMultiplication()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLComplexPixelWiseMultiplication::CLComplexPixelWiseMultiplication(CLComplexPixelWiseMultiplication &&) = default;
@@ -200,7 +199,7 @@ void CLComplexPixelWiseMultiplication::configure(const CLCompileContext &compile
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLComplexPixelWiseMultiplication>();
+ _impl->op = std::make_unique<experimental::CLComplexPixelWiseMultiplication>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info);
}
diff --git a/src/runtime/CL/functions/CLPoolingLayer.cpp b/src/runtime/CL/functions/CLPoolingLayer.cpp
index 7f99aee9ba..f3a2dbdd51 100644
--- a/src/runtime/CL/functions/CLPoolingLayer.cpp
+++ b/src/runtime/CL/functions/CLPoolingLayer.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLPoolingLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -40,7 +39,7 @@ void CLPoolingLayer::configure(const CLCompileContext &compile_context, ICLTenso
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
// Configure pooling kernel
- auto k = arm_compute::support::cpp14::make_unique<CLPoolingLayerKernel>();
+ auto k = std::make_unique<CLPoolingLayerKernel>();
k->set_target(CLScheduler::get().target());
k->configure(compile_context, input, output, pool_info, indices);
_kernel = std::move(k);
diff --git a/src/runtime/CL/functions/CLPriorBoxLayer.cpp b/src/runtime/CL/functions/CLPriorBoxLayer.cpp
index 8cb971793e..5ace7c6d7a 100644
--- a/src/runtime/CL/functions/CLPriorBoxLayer.cpp
+++ b/src/runtime/CL/functions/CLPriorBoxLayer.cpp
@@ -31,7 +31,6 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLPriorBoxLayerKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -54,7 +53,7 @@ void CLPriorBoxLayer::configure(const CLCompileContext &compile_context, const I
_max = cl::Buffer(CLScheduler::get().context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, info.max_sizes().size() * sizeof(float));
}
- auto k = arm_compute::support::cpp14::make_unique<CLPriorBoxLayerKernel>();
+ auto k = std::make_unique<CLPriorBoxLayerKernel>();
k->configure(compile_context, input1, input2, output, info, &_min, &_max, &_aspect_ratios);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLQLSTMLayer.cpp b/src/runtime/CL/functions/CLQLSTMLayer.cpp
index 54df5a0a5e..4395a39060 100644
--- a/src/runtime/CL/functions/CLQLSTMLayer.cpp
+++ b/src/runtime/CL/functions/CLQLSTMLayer.cpp
@@ -41,7 +41,6 @@
#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
#include "src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h"
#include "src/core/helpers/WindowHelpers.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -97,21 +96,21 @@ void CLQLSTMLayer::TensorCopyKernel::run()
}
CLQLSTMLayer::CLQLSTMLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _input_to_input_reduction(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
- _recurrent_to_input_reduction(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
- _input_to_forget_reduction(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
- _recurrent_to_forget_reduction(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
- _input_to_cell_reduction(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
- _recurrent_to_cell_reduction(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
- _input_to_output_reduction(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
- _recurrent_to_output_reduction(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
- _projection_reduction(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
+ : _input_to_input_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
+ _recurrent_to_input_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
+ _input_to_forget_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
+ _recurrent_to_forget_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
+ _input_to_cell_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
+ _recurrent_to_cell_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
+ _input_to_output_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
+ _recurrent_to_output_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
+ _projection_reduction(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
_layer_norms(),
- _copy_output(support::cpp14::make_unique<CLCopyKernel>())
+ _copy_output(std::make_unique<CLCopyKernel>())
{
for(auto &norm : _layer_norms)
{
- norm = support::cpp14::make_unique<CLQLSTMLayerNormalizationKernel>();
+ norm = std::make_unique<CLQLSTMLayerNormalizationKernel>();
}
_memory_group = MemoryGroup(std::move(memory_manager));
diff --git a/src/runtime/CL/functions/CLQuantizationLayer.cpp b/src/runtime/CL/functions/CLQuantizationLayer.cpp
index f132547eb9..cb8cabef87 100644
--- a/src/runtime/CL/functions/CLQuantizationLayer.cpp
+++ b/src/runtime/CL/functions/CLQuantizationLayer.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLQuantizationLayer.h"
#include "src/core/CL/kernels/CLQuantizationLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -35,7 +34,7 @@ void CLQuantizationLayer::configure(const ICLTensor *input, ICLTensor *output)
void CLQuantizationLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLQuantizationLayerKernel>();
+ auto k = std::make_unique<CLQuantizationLayerKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLRNNLayer.cpp b/src/runtime/CL/functions/CLRNNLayer.cpp
index be3e539f98..2a99ece388 100644
--- a/src/runtime/CL/functions/CLRNNLayer.cpp
+++ b/src/runtime/CL/functions/CLRNNLayer.cpp
@@ -41,14 +41,13 @@
#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
using namespace arm_compute::misc::shape_calculator;
CLRNNLayer::CLRNNLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _gemm_state_f(), _add_kernel(), _activation(), _fully_connected_kernel(), _copy_kernel(support::cpp14::make_unique<CLCopyKernel>()), _fully_connected_out(),
+ : _memory_group(std::move(memory_manager)), _gemm_state_f(), _add_kernel(), _activation(), _fully_connected_kernel(), _copy_kernel(std::make_unique<CLCopyKernel>()), _fully_connected_out(),
_gemm_output(), _add_output(), _is_prepared(false)
{
}
diff --git a/src/runtime/CL/functions/CLROIAlignLayer.cpp b/src/runtime/CL/functions/CLROIAlignLayer.cpp
index cf28a1a0fb..291ccff958 100644
--- a/src/runtime/CL/functions/CLROIAlignLayer.cpp
+++ b/src/runtime/CL/functions/CLROIAlignLayer.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/CL/ICLArray.h"
#include "src/core/CL/kernels/CLROIAlignLayerKernel.h"
#include "src/core/CL/kernels/CLROIPoolingLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -45,7 +44,7 @@ void CLROIAlignLayer::configure(const ICLTensor *input, const ICLTensor *rois, I
void CLROIAlignLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info)
{
// Configure ROI pooling kernel
- auto k = arm_compute::support::cpp14::make_unique<CLROIAlignLayerKernel>();
+ auto k = std::make_unique<CLROIAlignLayerKernel>();
k->configure(compile_context, input, rois, output, pool_info);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLROIPoolingLayer.cpp b/src/runtime/CL/functions/CLROIPoolingLayer.cpp
index b0e6716cce..debc5eb24c 100644
--- a/src/runtime/CL/functions/CLROIPoolingLayer.cpp
+++ b/src/runtime/CL/functions/CLROIPoolingLayer.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLROIPoolingLayer.h"
#include "arm_compute/core/CL/ICLArray.h"
#include "src/core/CL/kernels/CLROIPoolingLayerKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -36,7 +35,7 @@ void CLROIPoolingLayer::configure(const ICLTensor *input, const ICLTensor *rois,
void CLROIPoolingLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *rois, ICLTensor *output, const ROIPoolingLayerInfo &pool_info)
{
// Configure ROI pooling kernel
- auto k = arm_compute::support::cpp14::make_unique<CLROIPoolingLayerKernel>();
+ auto k = std::make_unique<CLROIPoolingLayerKernel>();
k->configure(compile_context, input, rois, output, pool_info);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLRange.cpp b/src/runtime/CL/functions/CLRange.cpp
index 57b57bd305..d4735c875d 100644
--- a/src/runtime/CL/functions/CLRange.cpp
+++ b/src/runtime/CL/functions/CLRange.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLRangeKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -39,7 +38,7 @@ void CLRange::configure(ICLTensor *output, const float start, const float end, c
void CLRange::configure(const CLCompileContext &compile_context, ICLTensor *output, const float start, const float end, const float step)
{
- auto k = arm_compute::support::cpp14::make_unique<CLRangeKernel>();
+ auto k = std::make_unique<CLRangeKernel>();
k->set_target(CLScheduler::get().target());
k->configure(compile_context, output, start, end, step);
_kernel = std::move(k);
diff --git a/src/runtime/CL/functions/CLReductionOperation.cpp b/src/runtime/CL/functions/CLReductionOperation.cpp
index 7423f4bc87..f40d945944 100644
--- a/src/runtime/CL/functions/CLReductionOperation.cpp
+++ b/src/runtime/CL/functions/CLReductionOperation.cpp
@@ -34,7 +34,6 @@
#include "src/core/CL/kernels/CLReductionOperationKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/runtime/Utils.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -224,7 +223,7 @@ void CLReductionOperation::configure(const CLCompileContext &compile_context, IC
_memory_group.manage(&_results_vector.back());
}
- _reduction_kernels_vector.emplace_back(support::cpp14::make_unique<CLReductionOperationKernel>());
+ _reduction_kernels_vector.emplace_back(std::make_unique<CLReductionOperationKernel>());
_reduction_kernels_vector[0]->configure(compile_context, input, output_internal, axis, op, 0);
}
else
@@ -273,10 +272,10 @@ void CLReductionOperation::configure(const CLCompileContext &compile_context, IC
ARM_COMPUTE_ERROR("Not supported");
}
- _reduction_kernels_vector.emplace_back(support::cpp14::make_unique<CLReductionOperationKernel>());
+ _reduction_kernels_vector.emplace_back(std::make_unique<CLReductionOperationKernel>());
_reduction_kernels_vector[0]->configure(compile_context, input, &_results_vector[0], axis, first_kernel_op);
- _border_handlers_vector.emplace_back(support::cpp14::make_unique<CLFillBorderKernel>());
+ _border_handlers_vector.emplace_back(std::make_unique<CLFillBorderKernel>());
_border_handlers_vector[0]->configure(compile_context, input, _reduction_kernels_vector[0]->border_size(), BorderMode::CONSTANT, pixelValue);
// Apply ReductionOperation on intermediate stages
@@ -284,10 +283,10 @@ void CLReductionOperation::configure(const CLCompileContext &compile_context, IC
{
_memory_group.manage(&_results_vector[i]);
- _reduction_kernels_vector.emplace_back(support::cpp14::make_unique<CLReductionOperationKernel>());
+ _reduction_kernels_vector.emplace_back(std::make_unique<CLReductionOperationKernel>());
_reduction_kernels_vector[i]->configure(compile_context, &_results_vector[i - 1], &_results_vector[i], axis, intermediate_kernel_op);
- _border_handlers_vector.emplace_back(support::cpp14::make_unique<CLFillBorderKernel>());
+ _border_handlers_vector.emplace_back(std::make_unique<CLFillBorderKernel>());
_border_handlers_vector[i]->configure(compile_context, &_results_vector[i - 1], _reduction_kernels_vector[i]->border_size(), BorderMode::CONSTANT, pixelValue);
_results_vector[i - 1].allocator()->allocate();
@@ -302,10 +301,10 @@ void CLReductionOperation::configure(const CLCompileContext &compile_context, IC
_memory_group.manage(&_results_vector.back());
}
- _reduction_kernels_vector.emplace_back(support::cpp14::make_unique<CLReductionOperationKernel>());
+ _reduction_kernels_vector.emplace_back(std::make_unique<CLReductionOperationKernel>());
_reduction_kernels_vector[last_stage]->configure(compile_context, &_results_vector[last_stage - 1], output_internal, axis, last_kernel_op, input_width);
- _border_handlers_vector.emplace_back(support::cpp14::make_unique<CLFillBorderKernel>());
+ _border_handlers_vector.emplace_back(std::make_unique<CLFillBorderKernel>());
_border_handlers_vector[last_stage]->configure(compile_context, &_results_vector[last_stage - 1], _reduction_kernels_vector[last_stage]->border_size(), BorderMode::CONSTANT, pixelValue);
_results_vector[last_stage - 1].allocator()->allocate();
diff --git a/src/runtime/CL/functions/CLRemap.cpp b/src/runtime/CL/functions/CLRemap.cpp
index 6466c2843b..a4cfc60368 100644
--- a/src/runtime/CL/functions/CLRemap.cpp
+++ b/src/runtime/CL/functions/CLRemap.cpp
@@ -30,7 +30,6 @@
#include "arm_compute/core/Validate.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLRemapKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -51,7 +50,7 @@ void CLRemap::configure(const CLCompileContext &compile_context, ICLTensor *inpu
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(map_y, 1, DataType::F32);
ARM_COMPUTE_ERROR_ON_MSG(policy == InterpolationPolicy::AREA, "Area interpolation is not supported");
- auto k = arm_compute::support::cpp14::make_unique<CLRemapKernel>();
+ auto k = std::make_unique<CLRemapKernel>();
k->configure(compile_context, input, map_x, map_y, output, policy, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLReorgLayer.cpp b/src/runtime/CL/functions/CLReorgLayer.cpp
index 4b2f70334f..69b28abab3 100644
--- a/src/runtime/CL/functions/CLReorgLayer.cpp
+++ b/src/runtime/CL/functions/CLReorgLayer.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Validate.h"
#include "src/core/CL/kernels/CLReorgLayerKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -41,7 +40,7 @@ void CLReorgLayer::configure(ICLTensor *input, ICLTensor *output, int32_t stride
void CLReorgLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, int32_t stride)
{
- auto k = arm_compute::support::cpp14::make_unique<CLReorgLayerKernel>();
+ auto k = std::make_unique<CLReorgLayerKernel>();
k->configure(compile_context, input, output, stride);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLReshapeLayer.cpp b/src/runtime/CL/functions/CLReshapeLayer.cpp
index 5112064b23..9abaa1b4e1 100644
--- a/src/runtime/CL/functions/CLReshapeLayer.cpp
+++ b/src/runtime/CL/functions/CLReshapeLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "src/core/CL/kernels/CLReshapeLayerKernel.h"
-#include "support/MemorySupport.h"
/** [CLReshapeLayer snippet] **/
namespace arm_compute
@@ -34,7 +33,7 @@ namespace experimental
{
void CLReshape::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLReshapeLayerKernel>();
+ auto k = std::make_unique<CLReshapeLayerKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
@@ -53,7 +52,7 @@ struct CLReshapeLayer::Impl
};
CLReshapeLayer::CLReshapeLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -70,7 +69,7 @@ void CLReshapeLayer::configure(const CLCompileContext &compile_context, const IC
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLReshape>();
+ _impl->op = std::make_unique<experimental::CLReshape>();
_impl->op->configure(compile_context, input->info(), output->info());
}
diff --git a/src/runtime/CL/functions/CLReverse.cpp b/src/runtime/CL/functions/CLReverse.cpp
index b73d8de62e..2a845bae13 100644
--- a/src/runtime/CL/functions/CLReverse.cpp
+++ b/src/runtime/CL/functions/CLReverse.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/Types.h"
#include "src/core/CL/kernels/CLReverseKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -36,7 +35,7 @@ void CLReverse::configure(const ICLTensor *input, ICLTensor *output, const ICLTe
void CLReverse::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ICLTensor *axis)
{
- auto k = arm_compute::support::cpp14::make_unique<CLReverseKernel>();
+ auto k = std::make_unique<CLReverseKernel>();
k->configure(compile_context, input, output, axis);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLScale.cpp b/src/runtime/CL/functions/CLScale.cpp
index 383b0cc305..6658957e07 100644
--- a/src/runtime/CL/functions/CLScale.cpp
+++ b/src/runtime/CL/functions/CLScale.cpp
@@ -29,7 +29,6 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLScaleKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -46,7 +45,7 @@ void CLScale::configure(ICLTensor *input, ICLTensor *output, InterpolationPolicy
void CLScale::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ScaleKernelInfo &info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLScaleKernel>();
+ auto k = std::make_unique<CLScaleKernel>();
k->set_target(CLScheduler::get().target());
k->configure(compile_context, input, output, info);
_kernel = std::move(k);
diff --git a/src/runtime/CL/functions/CLScharr3x3.cpp b/src/runtime/CL/functions/CLScharr3x3.cpp
index e5d0d2d630..563ec19266 100644
--- a/src/runtime/CL/functions/CLScharr3x3.cpp
+++ b/src/runtime/CL/functions/CLScharr3x3.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLScharr3x3Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -39,7 +38,7 @@ void CLScharr3x3::configure(ICLTensor *input, ICLTensor *output_x, ICLTensor *ou
void CLScharr3x3::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLScharr3x3Kernel>();
+ auto k = std::make_unique<CLScharr3x3Kernel>();
k->configure(compile_context, input, output_x, output_y, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLSelect.cpp b/src/runtime/CL/functions/CLSelect.cpp
index 374da91b78..5ec18a032f 100644
--- a/src/runtime/CL/functions/CLSelect.cpp
+++ b/src/runtime/CL/functions/CLSelect.cpp
@@ -27,8 +27,6 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLSelectKernel.h"
-#include "support/MemorySupport.h"
-
using namespace arm_compute;
namespace arm_compute
@@ -40,7 +38,7 @@ void CLSelect::configure(const ICLTensor *c, const ICLTensor *x, const ICLTensor
void CLSelect::configure(const CLCompileContext &compile_context, const ICLTensor *c, const ICLTensor *x, const ICLTensor *y, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLSelectKernel>();
+ auto k = std::make_unique<CLSelectKernel>();
k->configure(compile_context, c, x, y, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLSlice.cpp b/src/runtime/CL/functions/CLSlice.cpp
index 940540563a..7f39143dc7 100644
--- a/src/runtime/CL/functions/CLSlice.cpp
+++ b/src/runtime/CL/functions/CLSlice.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/helpers/tensor_transform.h"
#include "src/core/CL/kernels/CLStridedSliceKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -40,7 +39,7 @@ void CLSlice::configure(const CLCompileContext &compile_context, const ITensorIn
// Get absolute end coordinates
const int32_t slice_end_mask = arm_compute::helpers::tensor_transform::construct_slice_end_mask(ends);
- auto k = arm_compute::support::cpp14::make_unique<CLStridedSliceKernel>();
+ auto k = std::make_unique<CLStridedSliceKernel>();
k->configure(compile_context, input, output, starts, ends, BiStrides(), 0, slice_end_mask, 0);
_kernel = std::move(k);
}
@@ -70,7 +69,7 @@ struct CLSlice::Impl
};
CLSlice::CLSlice()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
CLSlice::CLSlice(CLSlice &&) = default;
@@ -91,7 +90,7 @@ void CLSlice::configure(const CLCompileContext &compile_context, const ICLTensor
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLSlice>();
+ _impl->op = std::make_unique<experimental::CLSlice>();
_impl->op->configure(compile_context, input->info(), output->info(), starts, ends);
}
diff --git a/src/runtime/CL/functions/CLSobel3x3.cpp b/src/runtime/CL/functions/CLSobel3x3.cpp
index 78376f935a..6724c12a72 100644
--- a/src/runtime/CL/functions/CLSobel3x3.cpp
+++ b/src/runtime/CL/functions/CLSobel3x3.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLSobel3x3Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -41,7 +40,7 @@ void CLSobel3x3::configure(ICLTensor *input, ICLTensor *output_x, ICLTensor *out
void CLSobel3x3::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLSobel3x3Kernel>();
+ auto k = std::make_unique<CLSobel3x3Kernel>();
k->configure(compile_context, input, output_x, output_y, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLSobel5x5.cpp b/src/runtime/CL/functions/CLSobel5x5.cpp
index fa5d8945fb..98f215794c 100644
--- a/src/runtime/CL/functions/CLSobel5x5.cpp
+++ b/src/runtime/CL/functions/CLSobel5x5.cpp
@@ -31,15 +31,14 @@
#include "arm_compute/runtime/ITensorAllocator.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLSobel5x5Kernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
CLSobel5x5::CLSobel5x5(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
- _sobel_hor(support::cpp14::make_unique<CLSobel5x5HorKernel>()),
- _sobel_vert(support::cpp14::make_unique<CLSobel5x5VertKernel>()),
- _border_handler(support::cpp14::make_unique<CLFillBorderKernel>()),
+ _sobel_hor(std::make_unique<CLSobel5x5HorKernel>()),
+ _sobel_vert(std::make_unique<CLSobel5x5VertKernel>()),
+ _border_handler(std::make_unique<CLFillBorderKernel>()),
_tmp_x(),
_tmp_y()
{
diff --git a/src/runtime/CL/functions/CLSobel7x7.cpp b/src/runtime/CL/functions/CLSobel7x7.cpp
index f462adb0ed..a3d63f98dd 100644
--- a/src/runtime/CL/functions/CLSobel7x7.cpp
+++ b/src/runtime/CL/functions/CLSobel7x7.cpp
@@ -31,15 +31,14 @@
#include "arm_compute/runtime/ITensorAllocator.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLSobel7x7Kernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
CLSobel7x7::CLSobel7x7(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
- _sobel_hor(support::cpp14::make_unique<CLSobel7x7HorKernel>()),
- _sobel_vert(support::cpp14::make_unique<CLSobel7x7VertKernel>()),
- _border_handler(support::cpp14::make_unique<CLFillBorderKernel>()),
+ _sobel_hor(std::make_unique<CLSobel7x7HorKernel>()),
+ _sobel_vert(std::make_unique<CLSobel7x7VertKernel>()),
+ _border_handler(std::make_unique<CLFillBorderKernel>()),
_tmp_x(),
_tmp_y()
{
diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
index 4caf91488e..93e63dd779 100644
--- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp
+++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
@@ -33,7 +33,6 @@
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLSoftmaxLayerKernel.h"
#include "src/core/helpers/SoftmaxHelpers.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -42,8 +41,8 @@ CLSoftmaxLayerGeneric<IS_LOG>::CLSoftmaxLayerGeneric(std::shared_ptr<IMemoryMana
: _memory_group(std::move(memory_manager)),
_permute_input(),
_permute_output(),
- _max_shift_exp_sum_kernel(support::cpp14::make_unique<CLLogits1DMaxShiftExpSumKernel>()),
- _norm_kernel(support::cpp14::make_unique<CLLogits1DNormKernel>()),
+ _max_shift_exp_sum_kernel(std::make_unique<CLLogits1DMaxShiftExpSumKernel>()),
+ _norm_kernel(std::make_unique<CLLogits1DNormKernel>()),
_max(),
_sum(),
_tmp(),
diff --git a/src/runtime/CL/functions/CLSpaceToBatchLayer.cpp b/src/runtime/CL/functions/CLSpaceToBatchLayer.cpp
index e83def5677..2db064af44 100644
--- a/src/runtime/CL/functions/CLSpaceToBatchLayer.cpp
+++ b/src/runtime/CL/functions/CLSpaceToBatchLayer.cpp
@@ -31,13 +31,12 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLMemsetKernel.h"
#include "src/core/CL/kernels/CLSpaceToBatchLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLSpaceToBatchLayer::CLSpaceToBatchLayer()
- : _space_to_batch_kernel(support::cpp14::make_unique<CLSpaceToBatchLayerKernel>()),
- _memset_kernel(support::cpp14::make_unique<CLMemsetKernel>()),
+ : _space_to_batch_kernel(std::make_unique<CLSpaceToBatchLayerKernel>()),
+ _memset_kernel(std::make_unique<CLMemsetKernel>()),
_has_padding(false)
{
}
diff --git a/src/runtime/CL/functions/CLSpaceToDepthLayer.cpp b/src/runtime/CL/functions/CLSpaceToDepthLayer.cpp
index db8c4953cc..842d5bc5cc 100644
--- a/src/runtime/CL/functions/CLSpaceToDepthLayer.cpp
+++ b/src/runtime/CL/functions/CLSpaceToDepthLayer.cpp
@@ -30,12 +30,11 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLSpaceToDepthLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLSpaceToDepthLayer::CLSpaceToDepthLayer()
- : _space_to_depth_kernel(support::cpp14::make_unique<CLSpaceToDepthLayerKernel>())
+ : _space_to_depth_kernel(std::make_unique<CLSpaceToDepthLayerKernel>())
{
}
diff --git a/src/runtime/CL/functions/CLStackLayer.cpp b/src/runtime/CL/functions/CLStackLayer.cpp
index f4aa78a72d..3ef6a27675 100644
--- a/src/runtime/CL/functions/CLStackLayer.cpp
+++ b/src/runtime/CL/functions/CLStackLayer.cpp
@@ -33,7 +33,6 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLStackLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -61,7 +60,7 @@ void CLStackLayer::configure(const CLCompileContext &compile_context, const std:
for(unsigned int i = 0; i < _num_inputs; i++)
{
- _stack_kernels.emplace_back(support::cpp14::make_unique<CLStackLayerKernel>());
+ _stack_kernels.emplace_back(std::make_unique<CLStackLayerKernel>());
_stack_kernels.back()->configure(compile_context, input[i], axis_u, i, _num_inputs, output);
}
}
diff --git a/src/runtime/CL/functions/CLStridedSlice.cpp b/src/runtime/CL/functions/CLStridedSlice.cpp
index 3f6814f5ce..fd3db9341a 100644
--- a/src/runtime/CL/functions/CLStridedSlice.cpp
+++ b/src/runtime/CL/functions/CLStridedSlice.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/Types.h"
#include "src/core/CL/kernels/CLStridedSliceKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -36,7 +35,7 @@ void CLStridedSlice::configure(const CLCompileContext &compile_context, const IT
const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
{
- auto k = arm_compute::support::cpp14::make_unique<CLStridedSliceKernel>();
+ auto k = std::make_unique<CLStridedSliceKernel>();
k->configure(compile_context, input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
_kernel = std::move(k);
}
@@ -58,7 +57,7 @@ struct CLStridedSlice::Impl
};
CLStridedSlice::CLStridedSlice(CLRuntimeContext *ctx)
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
_impl->ctx = ctx;
}
@@ -83,7 +82,7 @@ void CLStridedSlice::configure(const CLCompileContext &compile_context, const IC
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLStridedSlice>();
+ _impl->op = std::make_unique<experimental::CLStridedSlice>();
_impl->op->configure(compile_context, _impl->src->info(), _impl->dst->info(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
}
diff --git a/src/runtime/CL/functions/CLTableLookup.cpp b/src/runtime/CL/functions/CLTableLookup.cpp
index 8282f37e4b..a4671f51bd 100644
--- a/src/runtime/CL/functions/CLTableLookup.cpp
+++ b/src/runtime/CL/functions/CLTableLookup.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLTableLookup.h"
#include "src/core/CL/kernels/CLTableLookupKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLTableLookup::configure(const ICLTensor *input, const ICLLut *lut, ICLTens
void CLTableLookup::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLLut *lut, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLTableLookupKernel>();
+ auto k = std::make_unique<CLTableLookupKernel>();
k->configure(compile_context, input, lut, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLThreshold.cpp b/src/runtime/CL/functions/CLThreshold.cpp
index 250f6f034f..901cfd8993 100644
--- a/src/runtime/CL/functions/CLThreshold.cpp
+++ b/src/runtime/CL/functions/CLThreshold.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLThreshold.h"
#include "src/core/CL/kernels/CLThresholdKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -42,7 +41,7 @@ void CLThreshold::configure(const ICLTensor *input, ICLTensor *output, const Thr
void CLThreshold::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const ThresholdKernelInfo &info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLThresholdKernel>();
+ auto k = std::make_unique<CLThresholdKernel>();
k->configure(compile_context, input, output, info);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLTile.cpp b/src/runtime/CL/functions/CLTile.cpp
index 8384e48baf..818f10f1ac 100644
--- a/src/runtime/CL/functions/CLTile.cpp
+++ b/src/runtime/CL/functions/CLTile.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLTile.h"
#include "src/core/CL/kernels/CLTileKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -35,7 +34,7 @@ void CLTile::configure(const ICLTensor *input, ICLTensor *output, const Multiple
void CLTile::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const Multiples &multiples)
{
- auto k = arm_compute::support::cpp14::make_unique<CLTileKernel>();
+ auto k = std::make_unique<CLTileKernel>();
k->configure(compile_context, input, output, multiples);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLTranspose.cpp b/src/runtime/CL/functions/CLTranspose.cpp
index 43fa7a012a..c74503f4c0 100644
--- a/src/runtime/CL/functions/CLTranspose.cpp
+++ b/src/runtime/CL/functions/CLTranspose.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLTranspose.h"
#include "src/core/CL/kernels/CLTransposeKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void CLTranspose::configure(const ICLTensor *input, ICLTensor *output)
void CLTranspose::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
+ auto k = std::make_unique<CLTransposeKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/functions/CLUpsampleLayer.cpp b/src/runtime/CL/functions/CLUpsampleLayer.cpp
index 10b4b76a5e..538f27f565 100644
--- a/src/runtime/CL/functions/CLUpsampleLayer.cpp
+++ b/src/runtime/CL/functions/CLUpsampleLayer.cpp
@@ -27,12 +27,11 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "src/core/CL/kernels/CLUpsampleLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
CLUpsampleLayer::CLUpsampleLayer() // NOLINT
- : _upsample(support::cpp14::make_unique<CLUpsampleLayerKernel>()),
+ : _upsample(std::make_unique<CLUpsampleLayerKernel>()),
_output(nullptr)
{
}
diff --git a/src/runtime/CL/functions/CLWarpAffine.cpp b/src/runtime/CL/functions/CLWarpAffine.cpp
index 86e5a7bd86..9a22446cf6 100644
--- a/src/runtime/CL/functions/CLWarpAffine.cpp
+++ b/src/runtime/CL/functions/CLWarpAffine.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLWarpAffineKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -40,7 +39,7 @@ void CLWarpAffine::configure(ICLTensor *input, ICLTensor *output, const std::arr
void CLWarpAffine::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const std::array<float, 9> &matrix, InterpolationPolicy policy, BorderMode border_mode,
uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLWarpAffineKernel>();
+ auto k = std::make_unique<CLWarpAffineKernel>();
k->configure(compile_context, input, output, matrix, policy);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLWarpPerspective.cpp b/src/runtime/CL/functions/CLWarpPerspective.cpp
index 7e8bc5cdff..0ec6b42e75 100644
--- a/src/runtime/CL/functions/CLWarpPerspective.cpp
+++ b/src/runtime/CL/functions/CLWarpPerspective.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLWarpPerspectiveKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -40,7 +39,7 @@ void CLWarpPerspective::configure(ICLTensor *input, ICLTensor *output, const std
void CLWarpPerspective::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const std::array<float, 9> &matrix, InterpolationPolicy policy, BorderMode border_mode,
uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<CLWarpPerspectiveKernel>();
+ auto k = std::make_unique<CLWarpPerspectiveKernel>();
k->configure(compile_context, input, output, matrix, policy);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
index 7af42904e8..321466f05f 100644
--- a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
@@ -36,7 +36,6 @@
#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
#include "src/core/CL/kernels/CLWinogradFilterTransformKernel.h"
#include "src/core/CL/kernels/CLWinogradOutputTransformKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -99,8 +98,8 @@ bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_siz
} // namespace
CLWinogradConvolutionLayer::CLWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(memory_manager), _batched_mm(memory_manager), _input_transform(), _filter_transform(support::cpp14::make_unique<CLWinogradFilterTransformKernel>()),
- _output_transform(support::cpp14::make_unique<CLWinogradOutputTransformKernel>()), _input0(), _input1(), _batched_mm_output(), _original_weights(nullptr), _is_prepared(false)
+ : _memory_group(memory_manager), _batched_mm(memory_manager), _input_transform(), _filter_transform(std::make_unique<CLWinogradFilterTransformKernel>()),
+ _output_transform(std::make_unique<CLWinogradOutputTransformKernel>()), _input0(), _input1(), _batched_mm_output(), _original_weights(nullptr), _is_prepared(false)
{
}
diff --git a/src/runtime/CL/functions/CLWinogradInputTransform.cpp b/src/runtime/CL/functions/CLWinogradInputTransform.cpp
index 308c41f714..6d5a692bc3 100644
--- a/src/runtime/CL/functions/CLWinogradInputTransform.cpp
+++ b/src/runtime/CL/functions/CLWinogradInputTransform.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/core/Error.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLWinogradInputTransformKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -38,7 +37,7 @@ void CLWinogradInputTransform::configure(ICLTensor *input, ICLTensor *output, co
void CLWinogradInputTransform::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info)
{
- auto k = arm_compute::support::cpp14::make_unique<CLWinogradInputTransformKernel>();
+ auto k = std::make_unique<CLWinogradInputTransformKernel>();
k->configure(compile_context, input, output, winograd_info);
_kernel = std::move(k);
_border_handler->configure(compile_context, input, _kernel->border_size(), BorderMode::CONSTANT, PixelValue());
diff --git a/src/runtime/CL/functions/CLYOLOLayer.cpp b/src/runtime/CL/functions/CLYOLOLayer.cpp
index 46bf220b0c..e21d9a7fbb 100644
--- a/src/runtime/CL/functions/CLYOLOLayer.cpp
+++ b/src/runtime/CL/functions/CLYOLOLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/Types.h"
#include "src/core/CL/kernels/CLYOLOLayerKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -36,7 +35,7 @@ void CLYOLOLayer::configure(ICLTensor *input, ICLTensor *output, const Activatio
void CLYOLOLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const ActivationLayerInfo &act_info, int32_t num_classes)
{
- auto k = arm_compute::support::cpp14::make_unique<CLYOLOLayerKernel>();
+ auto k = std::make_unique<CLYOLOLayerKernel>();
k->configure(compile_context, input, output, act_info, num_classes);
_kernel = std::move(k);
}
diff --git a/src/runtime/CL/gemm/CLGEMMKernelSelection.h b/src/runtime/CL/gemm/CLGEMMKernelSelection.h
index f6fad7e4ff..69f8349d27 100644
--- a/src/runtime/CL/gemm/CLGEMMKernelSelection.h
+++ b/src/runtime/CL/gemm/CLGEMMKernelSelection.h
@@ -29,8 +29,6 @@
#include "src/runtime/CL/gemm/CLGEMMKernelSelectionMidgard.h"
#include "src/runtime/CL/gemm/CLGEMMKernelSelectionValhall.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
namespace cl_gemm
@@ -50,11 +48,11 @@ public:
switch(get_arch_from_target(gpu))
{
case GPUTarget::MIDGARD:
- return support::cpp14::make_unique<CLGEMMKernelSelectionMidgard>(gpu);
+ return std::make_unique<CLGEMMKernelSelectionMidgard>(gpu);
case GPUTarget::BIFROST:
- return support::cpp14::make_unique<CLGEMMKernelSelectionBifrost>(gpu);
+ return std::make_unique<CLGEMMKernelSelectionBifrost>(gpu);
case GPUTarget::VALHALL:
- return support::cpp14::make_unique<CLGEMMKernelSelectionValhall>(gpu);
+ return std::make_unique<CLGEMMKernelSelectionValhall>(gpu);
default:
ARM_COMPUTE_ERROR("Not supported GPU target");
}
diff --git a/src/runtime/CPP/CPPScheduler.cpp b/src/runtime/CPP/CPPScheduler.cpp
index e6b0ec20b8..663cde7a21 100644
--- a/src/runtime/CPP/CPPScheduler.cpp
+++ b/src/runtime/CPP/CPPScheduler.cpp
@@ -28,13 +28,13 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Utils.h"
#include "src/runtime/CPUUtils.h"
-#include "support/MemorySupport.h"
#include "support/Mutex.h"
#include <atomic>
#include <condition_variable>
#include <iostream>
#include <list>
+#include <memory>
#include <mutex>
#include <system_error>
#include <thread>
@@ -281,7 +281,7 @@ CPPScheduler &CPPScheduler::get()
}
CPPScheduler::CPPScheduler()
- : _impl(support::cpp14::make_unique<Impl>(num_threads_hint()))
+ : _impl(std::make_unique<Impl>(num_threads_hint()))
{
}
diff --git a/src/runtime/CPP/functions/CPPNonMaximumSuppression.cpp b/src/runtime/CPP/functions/CPPNonMaximumSuppression.cpp
index f9d2badd39..d0d0b1e98b 100644
--- a/src/runtime/CPP/functions/CPPNonMaximumSuppression.cpp
+++ b/src/runtime/CPP/functions/CPPNonMaximumSuppression.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/CPP/functions/CPPNonMaximumSuppression.h"
#include "arm_compute/core/CPP/kernels/CPPNonMaximumSuppressionKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -32,7 +31,7 @@ void CPPNonMaximumSuppression::configure(
const ITensor *bboxes, const ITensor *scores, ITensor *indices, unsigned int max_output_size,
const float score_threshold, const float nms_threshold)
{
- auto k = arm_compute::support::cpp14::make_unique<CPPNonMaximumSuppressionKernel>();
+ auto k = std::make_unique<CPPNonMaximumSuppressionKernel>();
k->configure(bboxes, scores, indices, max_output_size, score_threshold, nms_threshold);
_kernel = std::move(k);
}
diff --git a/src/runtime/CPP/functions/CPPPermute.cpp b/src/runtime/CPP/functions/CPPPermute.cpp
index 7ea1070160..76fa09f12b 100644
--- a/src/runtime/CPP/functions/CPPPermute.cpp
+++ b/src/runtime/CPP/functions/CPPPermute.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/CPP/functions/CPPPermute.h"
#include "arm_compute/core/CPP/kernels/CPPPermuteKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
void CPPPermute::configure(const ITensor *input, ITensor *output, const PermutationVector &perm)
{
- auto k = arm_compute::support::cpp14::make_unique<CPPPermuteKernel>();
+ auto k = std::make_unique<CPPPermuteKernel>();
k->configure(input, output, perm);
_kernel = std::move(k);
}
diff --git a/src/runtime/CPP/functions/CPPTopKV.cpp b/src/runtime/CPP/functions/CPPTopKV.cpp
index bd089ac680..2547e56a1d 100644
--- a/src/runtime/CPP/functions/CPPTopKV.cpp
+++ b/src/runtime/CPP/functions/CPPTopKV.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/CPP/functions/CPPTopKV.h"
#include "arm_compute/core/CPP/kernels/CPPTopKVKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void CPPTopKV::configure(const ITensor *predictions, const ITensor *targets, ITensor *output, const unsigned int k)
{
- auto kernel = arm_compute::support::cpp14::make_unique<CPPTopKVKernel>();
+ auto kernel = std::make_unique<CPPTopKVKernel>();
kernel->configure(predictions, targets, output, k);
_kernel = std::move(kernel);
}
diff --git a/src/runtime/CPP/functions/CPPUpsample.cpp b/src/runtime/CPP/functions/CPPUpsample.cpp
index 7dfc3b8136..3b4ba2ba42 100644
--- a/src/runtime/CPP/functions/CPPUpsample.cpp
+++ b/src/runtime/CPP/functions/CPPUpsample.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/CPP/functions/CPPUpsample.h"
#include "arm_compute/core/CPP/kernels/CPPUpsampleKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
void CPPUpsample::configure(const ITensor *input, ITensor *output, const PadStrideInfo &info)
{
- auto k = arm_compute::support::cpp14::make_unique<CPPUpsampleKernel>();
+ auto k = std::make_unique<CPPUpsampleKernel>();
k->configure(input, output, info);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp b/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp
index ec91027915..695331d743 100644
--- a/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp
+++ b/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 Arm Limited.
+ * Copyright (c) 2018-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,6 +54,6 @@ void GCBufferAllocator::free(void *ptr)
std::unique_ptr<IMemoryRegion> GCBufferAllocator::make_region(size_t size, size_t alignment)
{
ARM_COMPUTE_UNUSED(alignment);
- return arm_compute::support::cpp14::make_unique<GCBufferMemoryRegion>(size);
+ return std::make_unique<GCBufferMemoryRegion>(size);
}
} // namespace arm_compute
diff --git a/src/runtime/GLES_COMPUTE/GCRuntimeContext.cpp b/src/runtime/GLES_COMPUTE/GCRuntimeContext.cpp
index 6599f5296a..2ed78fe099 100644
--- a/src/runtime/GLES_COMPUTE/GCRuntimeContext.cpp
+++ b/src/runtime/GLES_COMPUTE/GCRuntimeContext.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Arm Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,7 @@
namespace arm_compute
{
GCRuntimeContext::GCRuntimeContext()
- : _gpu_owned_scheduler(support::cpp14::make_unique<GCScheduler>()),
+ : _gpu_owned_scheduler(std::make_unique<GCScheduler>()),
_gpu_scheduler(_gpu_owned_scheduler.get()),
_core_context()
{
diff --git a/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp b/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp
index ff96c3cb83..b3344d8ecb 100644
--- a/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp
+++ b/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -46,7 +45,7 @@ void GCTensorAllocator::allocate()
{
if(_associated_memory_group == nullptr)
{
- _memory.set_owned_region(support::cpp14::make_unique<GCBufferMemoryRegion>(info().total_size()));
+ _memory.set_owned_region(std::make_unique<GCBufferMemoryRegion>(info().total_size()));
}
else
{
diff --git a/src/runtime/GLES_COMPUTE/functions/GCAbsoluteDifference.cpp b/src/runtime/GLES_COMPUTE/functions/GCAbsoluteDifference.cpp
index 1b13143bde..29630c8981 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCAbsoluteDifference.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCAbsoluteDifference.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/GLES_COMPUTE/kernels/GCAbsoluteDifferenceKernel.h"
#include "arm_compute/core/Helpers.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ using namespace arm_compute;
void GCAbsoluteDifference::configure(const IGCTensor *input1, const IGCTensor *input2, IGCTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<GCAbsoluteDifferenceKernel>();
+ auto k = std::make_unique<GCAbsoluteDifferenceKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCActivationLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCActivationLayer.cpp
index a7ec758138..b3815f1625 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCActivationLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCActivationLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/GLES_COMPUTE/kernels/GCActivationLayerKernel.h"
#include "arm_compute/core/Helpers.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -38,7 +37,7 @@ void GCActivationLayer::configure(IGCTensor *input, IGCTensor *output, Activatio
{
auto core_ctx = _ctx ? _ctx->core_runtime_context() : /* Legacy */ nullptr;
- auto k = arm_compute::support::cpp14::make_unique<GCActivationLayerKernel>(core_ctx);
+ auto k = std::make_unique<GCActivationLayerKernel>(core_ctx);
k->configure(input, output, act_info);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.cpp b/src/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.cpp
index 580f8d573c..5661a9bfdd 100755
--- a/src/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/GLES_COMPUTE/functions/GCArithmeticAddition.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCArithmeticAdditionKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -33,7 +32,7 @@ using namespace arm_compute;
void GCArithmeticAddition::configure(const IGCTensor *input1, const IGCTensor *input2, IGCTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(act_info);
- auto k = arm_compute::support::cpp14::make_unique<GCArithmeticAdditionKernel>();
+ auto k = std::make_unique<GCArithmeticAdditionKernel>();
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp
index 807412eb17..2c21d81e17 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCConcatenateLayer.cpp
@@ -31,8 +31,6 @@
#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
#include "src/core/helpers/AutoConfiguration.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
GCConcatenateLayer::GCConcatenateLayer()
@@ -61,7 +59,7 @@ void GCConcatenateLayer::configure(std::vector<IGCTensor *> inputs_vector, IGCTe
{
for(unsigned int i = 0; i < _num_inputs; ++i)
{
- auto kernel = support::cpp14::make_unique<GCDepthConcatenateLayerKernel>();
+ auto kernel = std::make_unique<GCDepthConcatenateLayerKernel>();
kernel->configure(inputs_vector.at(i), offset, output);
offset += inputs_vector.at(i)->info()->dimension(axis);
_concat_kernels.emplace_back(std::move(kernel));
diff --git a/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp
index 0d0526d5c9..93a66f012e 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Arm Limited.
+ * Copyright (c) 2017-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,7 +31,6 @@
#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
#include <cmath>
-#include <memory>
#include <tuple>
using namespace arm_compute;
diff --git a/src/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.cpp
index 4ddd0ab4ca..46d5cc40d9 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCDepthwiseConvolutionLayer.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/GLES_COMPUTE/IGCTensor.h"
#include "arm_compute/core/PixelValue.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -40,7 +39,7 @@ void GCDepthwiseConvolutionLayer3x3::configure(IGCTensor *input, const IGCTensor
{
ARM_COMPUTE_ERROR_ON(dilation.x() != 1 || dilation.y() != 1);
ARM_COMPUTE_UNUSED(dilation);
- auto k = arm_compute::support::cpp14::make_unique<GCDepthwiseConvolutionLayer3x3Kernel>();
+ auto k = std::make_unique<GCDepthwiseConvolutionLayer3x3Kernel>();
k->configure(input, weights, biases, output, conv_info, depth_multiplier);
_kernel = std::move(k);
diff --git a/src/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.cpp
index c2aa81567e..63c963196a 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCDirectConvolutionLayer.cpp
@@ -30,7 +30,6 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
@@ -46,19 +45,19 @@ void GCDirectConvolutionLayer::configure(IGCTensor *input, const IGCTensor *weig
if(kernel_size == 1)
{
- auto k = arm_compute::support::cpp14::make_unique<GCDirectConvolutionLayer1x1Kernel>();
+ auto k = std::make_unique<GCDirectConvolutionLayer1x1Kernel>();
k->configure(input, weights, biases, output, conv_info, act_info);
_kernel = std::move(k);
}
else if(kernel_size == 3)
{
- auto k = arm_compute::support::cpp14::make_unique<GCDirectConvolutionLayer3x3Kernel>();
+ auto k = std::make_unique<GCDirectConvolutionLayer3x3Kernel>();
k->configure(input, weights, biases, output, conv_info, act_info);
_kernel = std::move(k);
}
else if(kernel_size == 5)
{
- auto k = arm_compute::support::cpp14::make_unique<GCDirectConvolutionLayer5x5Kernel>();
+ auto k = std::make_unique<GCDirectConvolutionLayer5x5Kernel>();
k->configure(input, weights, biases, output, conv_info, act_info);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCFillBorder.cpp b/src/runtime/GLES_COMPUTE/functions/GCFillBorder.cpp
index 080b5a22ac..97b4fd946c 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCFillBorder.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCFillBorder.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/GLES_COMPUTE/kernels/GCFillBorderKernel.h"
#include "arm_compute/core/Helpers.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ using namespace arm_compute;
void GCFillBorder::configure(IGCTensor *tensor, unsigned int border_width, BorderMode border_mode, const PixelValue &constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<GCFillBorderKernel>();
+ auto k = std::make_unique<GCFillBorderKernel>();
k->configure(tensor, BorderSize(border_width), border_mode, constant_border_value);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp
index 57a09edfd6..299a027b42 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
-#include "support/MemorySupport.h"
#include <algorithm>
@@ -33,7 +32,7 @@ using namespace arm_compute;
void GCFullyConnectedLayerReshapeWeights::configure(const IGCTensor *input, IGCTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<GCTransposeKernel>();
+ auto k = std::make_unique<GCTransposeKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCGEMMInterleave4x4.cpp b/src/runtime/GLES_COMPUTE/functions/GCGEMMInterleave4x4.cpp
index 1366a134aa..c1287f7e9c 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCGEMMInterleave4x4.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCGEMMInterleave4x4.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/GLES_COMPUTE/functions/GCGEMMInterleave4x4.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMInterleave4x4Kernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
void GCGEMMInterleave4x4::configure(const IGCTensor *input, IGCTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<GCGEMMInterleave4x4Kernel>();
+ auto k = std::make_unique<GCGEMMInterleave4x4Kernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCGEMMTranspose1xW.cpp b/src/runtime/GLES_COMPUTE/functions/GCGEMMTranspose1xW.cpp
index 877f81ae9b..d085357eaa 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCGEMMTranspose1xW.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCGEMMTranspose1xW.cpp
@@ -26,13 +26,12 @@
#include "arm_compute/core/GLES_COMPUTE/IGCTensor.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCGEMMTranspose1xWKernel.h"
#include "arm_compute/core/Types.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
void GCGEMMTranspose1xW::configure(const IGCTensor *input, IGCTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<GCGEMMTranspose1xWKernel>();
+ auto k = std::make_unique<GCGEMMTranspose1xWKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.cpp b/src/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.cpp
index daf978f3ac..ce50a63e53 100755
--- a/src/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/GLES_COMPUTE/functions/GCPixelWiseMultiplication.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCPixelWiseMultiplicationKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -33,7 +32,7 @@ using namespace arm_compute;
void GCPixelWiseMultiplication::configure(const IGCTensor *input1, const IGCTensor *input2, IGCTensor *output, float scale, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(act_info);
- auto k = arm_compute::support::cpp14::make_unique<GCPixelWiseMultiplicationKernel>();
+ auto k = std::make_unique<GCPixelWiseMultiplicationKernel>();
k->configure(input1, input2, output, scale);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCPoolingLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCPoolingLayer.cpp
index e4ccabc503..6a71fbebe7 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCPoolingLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCPoolingLayer.cpp
@@ -27,8 +27,6 @@
#include "arm_compute/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
GCPoolingLayer::GCPoolingLayer()
@@ -39,7 +37,7 @@ GCPoolingLayer::GCPoolingLayer()
void GCPoolingLayer::configure(IGCTensor *input, IGCTensor *output, const PoolingLayerInfo &pool_info, IGCTensor *indices)
{
// Configure pooling kernel
- auto k = arm_compute::support::cpp14::make_unique<GCPoolingLayerKernel>();
+ auto k = std::make_unique<GCPoolingLayerKernel>();
k->configure(input, output, pool_info, indices);
_kernel = std::move(k);
diff --git a/src/runtime/GLES_COMPUTE/functions/GCScale.cpp b/src/runtime/GLES_COMPUTE/functions/GCScale.cpp
index dccbe9960d..720006fead 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCScale.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCScale.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/core/GLES_COMPUTE/IGCTensor.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCScaleKernel.h"
#include "arm_compute/core/Validate.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -39,7 +38,7 @@ void GCScale::configure(IGCTensor *input, IGCTensor *output, InterpolationPolicy
void GCScale::configure(IGCTensor *input, IGCTensor *output, const ScaleKernelInfo &info)
{
- auto k = arm_compute::support::cpp14::make_unique<GCScaleKernel>();
+ auto k = std::make_unique<GCScaleKernel>();
k->configure(input, output, info);
_kernel = std::move(k);
_border_handler.configure(input, _kernel->border_size(), info.border_mode, info.constant_border_value);
diff --git a/src/runtime/GLES_COMPUTE/functions/GCTensorShift.cpp b/src/runtime/GLES_COMPUTE/functions/GCTensorShift.cpp
index 4cbd2e3e8e..050dc7e9f5 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCTensorShift.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCTensorShift.cpp
@@ -28,13 +28,12 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/PixelValue.h"
#include "arm_compute/core/Utils.h"
-#include "support/MemorySupport.h"
using namespace arm_compute;
void GCTensorShift::configure(IGCTensor *input)
{
- auto k = arm_compute::support::cpp14::make_unique<GCTensorShiftKernel>();
+ auto k = std::make_unique<GCTensorShiftKernel>();
k->configure(input);
_kernel = std::move(k);
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCTranspose.cpp b/src/runtime/GLES_COMPUTE/functions/GCTranspose.cpp
index da4471c925..14125e9db2 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCTranspose.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCTranspose.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/GLES_COMPUTE/functions/GCTranspose.h"
#include "arm_compute/core/GLES_COMPUTE/kernels/GCTransposeKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,7 +31,7 @@ using namespace arm_compute;
void GCTranspose::configure(const IGCTensor *input, IGCTensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<GCTransposeKernel>();
+ auto k = std::make_unique<GCTransposeKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEAbsoluteDifference.cpp b/src/runtime/NEON/functions/NEAbsoluteDifference.cpp
index df2bc7d72e..1c37af980e 100644
--- a/src/runtime/NEON/functions/NEAbsoluteDifference.cpp
+++ b/src/runtime/NEON/functions/NEAbsoluteDifference.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEAbsoluteDifference.h"
#include "src/core/NEON/kernels/NEAbsoluteDifferenceKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ NEAbsoluteDifference::~NEAbsoluteDifference() = default;
void NEAbsoluteDifference::configure(const ITensor *input1, const ITensor *input2, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEAbsoluteDifferenceKernel>();
+ auto k = std::make_unique<NEAbsoluteDifferenceKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEAccumulate.cpp b/src/runtime/NEON/functions/NEAccumulate.cpp
index 20eefd9d2d..b81ec24a39 100644
--- a/src/runtime/NEON/functions/NEAccumulate.cpp
+++ b/src/runtime/NEON/functions/NEAccumulate.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEAccumulate.h"
#include "src/core/NEON/kernels/NEAccumulateKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ NEAccumulate::~NEAccumulate() = default;
void NEAccumulate::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEAccumulateKernel>();
+ auto k = std::make_unique<NEAccumulateKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
@@ -45,13 +44,13 @@ void NEAccumulateWeighted::configure(const ITensor *input, float alpha, ITensor
{
if(use_fp16)
{
- auto k = arm_compute::support::cpp14::make_unique<NEAccumulateWeightedFP16Kernel>();
+ auto k = std::make_unique<NEAccumulateWeightedFP16Kernel>();
k->configure(input, alpha, output);
_kernel = std::move(k);
}
else
{
- auto k = arm_compute::support::cpp14::make_unique<NEAccumulateWeightedKernel>();
+ auto k = std::make_unique<NEAccumulateWeightedKernel>();
k->configure(input, alpha, output);
_kernel = std::move(k);
}
@@ -61,7 +60,7 @@ NEAccumulateSquared::~NEAccumulateSquared() = default;
void NEAccumulateSquared::configure(const ITensor *input, uint32_t shift, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEAccumulateSquaredKernel>();
+ auto k = std::make_unique<NEAccumulateSquaredKernel>();
k->configure(input, shift, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEActivationLayer.cpp b/src/runtime/NEON/functions/NEActivationLayer.cpp
index f9ad298e4d..27f01f67ce 100644
--- a/src/runtime/NEON/functions/NEActivationLayer.cpp
+++ b/src/runtime/NEON/functions/NEActivationLayer.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/runtime/IRuntimeContext.h"
#include "arm_compute/runtime/Tensor.h"
#include "src/core/NEON/kernels/NEActivationLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -38,7 +37,7 @@ NEActivationLayer::~NEActivationLayer() = default;
void NEActivationLayer::configure(const ITensorInfo *input, ITensorInfo *output, const ActivationLayerInfo &activation_info)
{
- auto k = arm_compute::support::cpp14::make_unique<NEActivationLayerKernel>();
+ auto k = std::make_unique<NEActivationLayerKernel>();
k->configure(input, output, activation_info);
_kernel = std::move(k);
}
@@ -58,7 +57,7 @@ struct NEActivationLayer::Impl
};
NEActivationLayer::NEActivationLayer(IRuntimeContext *ctx)
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
_impl->ctx = ctx;
}
@@ -76,7 +75,7 @@ void NEActivationLayer::configure(ITensor *input, ITensor *output, ActivationLay
_impl->src = input;
_impl->dst = output == nullptr ? input : output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEActivationLayer>();
+ _impl->op = std::make_unique<experimental::NEActivationLayer>();
_impl->op->configure(_impl->src->info(), _impl->dst->info(), activation_info);
}
diff --git a/src/runtime/NEON/functions/NEArgMinMaxLayer.cpp b/src/runtime/NEON/functions/NEArgMinMaxLayer.cpp
index 2a9bb76c7f..7bca20d46c 100644
--- a/src/runtime/NEON/functions/NEArgMinMaxLayer.cpp
+++ b/src/runtime/NEON/functions/NEArgMinMaxLayer.cpp
@@ -31,14 +31,12 @@
#include "arm_compute/core/Validate.h"
#include "src/core/NEON/kernels/NEReductionOperationKernel.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
NEArgMinMaxLayer::~NEArgMinMaxLayer() = default;
NEArgMinMaxLayer::NEArgMinMaxLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _reduction_function(support::cpp14::make_unique<NEReductionOperation>())
+ : _reduction_function(std::make_unique<NEReductionOperation>())
{
ARM_COMPUTE_UNUSED(memory_manager);
}
diff --git a/src/runtime/NEON/functions/NEArithmeticAddition.cpp b/src/runtime/NEON/functions/NEArithmeticAddition.cpp
index 0bf9a09333..1eaccf3396 100644
--- a/src/runtime/NEON/functions/NEArithmeticAddition.cpp
+++ b/src/runtime/NEON/functions/NEArithmeticAddition.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/ITensor.h"
#include "src/core/NEON/kernels/NEArithmeticAdditionKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -38,7 +37,7 @@ NEArithmeticAddition::~NEArithmeticAddition() = default;
void NEArithmeticAddition::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(act_info);
- auto k = arm_compute::support::cpp14::make_unique<NEArithmeticAdditionKernel>();
+ auto k = std::make_unique<NEArithmeticAdditionKernel>();
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
}
@@ -58,7 +57,7 @@ struct NEArithmeticAddition::Impl
};
NEArithmeticAddition::NEArithmeticAddition()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEArithmeticAddition::NEArithmeticAddition(NEArithmeticAddition &&) = default;
@@ -75,7 +74,7 @@ void NEArithmeticAddition::configure(const ITensor *input1, const ITensor *input
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEArithmeticAddition>();
+ _impl->op = std::make_unique<experimental::NEArithmeticAddition>();
_impl->op->configure(input1->info(), input2->info(), output->info(), policy, act_info);
}
diff --git a/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp b/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
index ba3f426269..512cfd6f70 100644
--- a/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
+++ b/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/ITensor.h"
#include "src/core/NEON/kernels/NEArithmeticSubtractionKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -36,7 +35,7 @@ namespace experimental
void NEArithmeticSubtraction::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(act_info);
- auto k = arm_compute::support::cpp14::make_unique<NEArithmeticSubtractionKernel>();
+ auto k = std::make_unique<NEArithmeticSubtractionKernel>();
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
}
@@ -57,7 +56,7 @@ struct NEArithmeticSubtraction::Impl
};
NEArithmeticSubtraction::NEArithmeticSubtraction()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEArithmeticSubtraction::NEArithmeticSubtraction(NEArithmeticSubtraction &&) = default;
@@ -74,7 +73,7 @@ void NEArithmeticSubtraction::configure(const ITensor *input1, const ITensor *in
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEArithmeticSubtraction>();
+ _impl->op = std::make_unique<experimental::NEArithmeticSubtraction>();
_impl->op->configure(input1->info(), input2->info(), output->info(), policy, act_info);
}
diff --git a/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp b/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp
index d0fdfcf101..b90a38b47f 100644
--- a/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp
@@ -31,8 +31,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEBatchNormalizationLayerKernel.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
NEBatchNormalizationLayer::~NEBatchNormalizationLayer() = default;
@@ -46,7 +44,7 @@ void NEBatchNormalizationLayer::configure(ITensor *input, ITensor *output, const
ActivationLayerInfo act_info)
{
// Configure kernel
- _norm_kernel = arm_compute::support::cpp14::make_unique<NEBatchNormalizationLayerKernel>();
+ _norm_kernel = std::make_unique<NEBatchNormalizationLayerKernel>();
_norm_kernel->configure(input, output, mean, var, beta, gamma, epsilon, act_info);
}
diff --git a/src/runtime/NEON/functions/NEBatchToSpaceLayer.cpp b/src/runtime/NEON/functions/NEBatchToSpaceLayer.cpp
index 77a63c0f63..8f537a650a 100644
--- a/src/runtime/NEON/functions/NEBatchToSpaceLayer.cpp
+++ b/src/runtime/NEON/functions/NEBatchToSpaceLayer.cpp
@@ -30,20 +30,18 @@
#include "arm_compute/core/Validate.h"
#include "src/core/NEON/kernels/NEBatchToSpaceLayerKernel.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
void NEBatchToSpaceLayer::configure(const ITensor *input, const ITensor *block_shape, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEBatchToSpaceLayerKernel>();
+ auto k = std::make_unique<NEBatchToSpaceLayerKernel>();
k->configure(input, block_shape, output);
_kernel = std::move(k);
}
void NEBatchToSpaceLayer::configure(const ITensor *input, int32_t block_shape_x, int32_t block_shape_y, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEBatchToSpaceLayerKernel>();
+ auto k = std::make_unique<NEBatchToSpaceLayerKernel>();
k->configure(input, block_shape_x, block_shape_y, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEBitwiseAnd.cpp b/src/runtime/NEON/functions/NEBitwiseAnd.cpp
index f3b5220ccf..81c087988a 100644
--- a/src/runtime/NEON/functions/NEBitwiseAnd.cpp
+++ b/src/runtime/NEON/functions/NEBitwiseAnd.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEBitwiseAnd.h"
#include "src/core/NEON/kernels/NEBitwiseAndKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,7 +31,7 @@ using namespace arm_compute;
void NEBitwiseAnd::configure(const ITensor *input1, const ITensor *input2, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEBitwiseAndKernel>();
+ auto k = std::make_unique<NEBitwiseAndKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEBitwiseNot.cpp b/src/runtime/NEON/functions/NEBitwiseNot.cpp
index 036584ea1a..3155df5db3 100644
--- a/src/runtime/NEON/functions/NEBitwiseNot.cpp
+++ b/src/runtime/NEON/functions/NEBitwiseNot.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEBitwiseNot.h"
#include "src/core/NEON/kernels/NEBitwiseNotKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,7 +31,7 @@ using namespace arm_compute;
void NEBitwiseNot::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEBitwiseNotKernel>();
+ auto k = std::make_unique<NEBitwiseNotKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEBitwiseOr.cpp b/src/runtime/NEON/functions/NEBitwiseOr.cpp
index fc905a0919..793eb25d80 100644
--- a/src/runtime/NEON/functions/NEBitwiseOr.cpp
+++ b/src/runtime/NEON/functions/NEBitwiseOr.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEBitwiseOr.h"
#include "src/core/NEON/kernels/NEBitwiseOrKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,7 +31,7 @@ using namespace arm_compute;
void NEBitwiseOr::configure(const ITensor *input1, const ITensor *input2, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEBitwiseOrKernel>();
+ auto k = std::make_unique<NEBitwiseOrKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEBitwiseXor.cpp b/src/runtime/NEON/functions/NEBitwiseXor.cpp
index 301a0c4659..2d0af63e35 100644
--- a/src/runtime/NEON/functions/NEBitwiseXor.cpp
+++ b/src/runtime/NEON/functions/NEBitwiseXor.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEBitwiseXor.h"
#include "src/core/NEON/kernels/NEBitwiseXorKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,7 +31,7 @@ using namespace arm_compute;
void NEBitwiseXor::configure(const ITensor *input1, const ITensor *input2, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEBitwiseXorKernel>();
+ auto k = std::make_unique<NEBitwiseXorKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEBoundingBoxTransform.cpp b/src/runtime/NEON/functions/NEBoundingBoxTransform.cpp
index 0b639430b1..cfd14faca0 100644
--- a/src/runtime/NEON/functions/NEBoundingBoxTransform.cpp
+++ b/src/runtime/NEON/functions/NEBoundingBoxTransform.cpp
@@ -24,14 +24,12 @@
#include "arm_compute/runtime/NEON/functions/NEBoundingBoxTransform.h"
#include "src/core/NEON/kernels/NEBoundingBoxTransformKernel.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
void NEBoundingBoxTransform::configure(const ITensor *boxes, ITensor *pred_boxes, const ITensor *deltas, const BoundingBoxTransformInfo &info)
{
// Configure Bounding Box kernel
- auto k = arm_compute::support::cpp14::make_unique<NEBoundingBoxTransformKernel>();
+ auto k = std::make_unique<NEBoundingBoxTransformKernel>();
k->configure(boxes, pred_boxes, deltas, info);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEBox3x3.cpp b/src/runtime/NEON/functions/NEBox3x3.cpp
index 01d2356a4c..ee40e2c475 100644
--- a/src/runtime/NEON/functions/NEBox3x3.cpp
+++ b/src/runtime/NEON/functions/NEBox3x3.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/NEON/kernels/NEBox3x3Kernel.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -36,17 +35,17 @@ void NEBox3x3::configure(ITensor *input, ITensor *output, BorderMode border_mode
{
if(use_fp16)
{
- auto k = arm_compute::support::cpp14::make_unique<NEBox3x3FP16Kernel>();
+ auto k = std::make_unique<NEBox3x3FP16Kernel>();
k->configure(input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
}
else
{
- auto k = arm_compute::support::cpp14::make_unique<NEBox3x3Kernel>();
+ auto k = std::make_unique<NEBox3x3Kernel>();
k->configure(input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
}
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NECannyEdge.cpp b/src/runtime/NEON/functions/NECannyEdge.cpp
index bf4f7d7933..52bc81e001 100644
--- a/src/runtime/NEON/functions/NECannyEdge.cpp
+++ b/src/runtime/NEON/functions/NECannyEdge.cpp
@@ -36,7 +36,6 @@
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NESobel5x5Kernel.h"
#include "src/core/NEON/kernels/NESobel7x7Kernel.h"
-#include "support/MemorySupport.h"
#include <cstring>
#include <inttypes.h>
@@ -105,19 +104,19 @@ void NECannyEdge::configure(ITensor *input, ITensor *output, int32_t upper_thr,
// Configure/Init sobelNxN
if(gradient_size == 3)
{
- auto k = arm_compute::support::cpp14::make_unique<NESobel3x3>();
+ auto k = std::make_unique<NESobel3x3>();
k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
}
else if(gradient_size == 5)
{
- auto k = arm_compute::support::cpp14::make_unique<NESobel5x5>();
+ auto k = std::make_unique<NESobel5x5>();
k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
}
else if(gradient_size == 7)
{
- auto k = arm_compute::support::cpp14::make_unique<NESobel7x7>();
+ auto k = std::make_unique<NESobel7x7>();
k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
}
@@ -131,7 +130,7 @@ void NECannyEdge::configure(ITensor *input, ITensor *output, int32_t upper_thr,
_memory_group.manage(&_phase);
// Configure gradient
- auto k = arm_compute::support::cpp14::make_unique<NEGradientKernel>();
+ auto k = std::make_unique<NEGradientKernel>();
k->configure(&_gx, &_gy, &_magnitude, &_phase, norm_type);
_gradient = std::move(k);
@@ -143,12 +142,12 @@ void NECannyEdge::configure(ITensor *input, ITensor *output, int32_t upper_thr,
_memory_group.manage(&_nonmax);
// Configure non-maxima suppression
- _non_max_suppr = arm_compute::support::cpp14::make_unique<NEEdgeNonMaxSuppressionKernel>();
+ _non_max_suppr = std::make_unique<NEEdgeNonMaxSuppressionKernel>();
_non_max_suppr->configure(&_magnitude, &_phase, &_nonmax, upper_thr, lower_thr, border_mode == BorderMode::UNDEFINED);
// Fill border around magnitude image as non-maxima suppression will access
// it. If border mode is undefined filling the border is a nop.
- _border_mag_gradient = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _border_mag_gradient = std::make_unique<NEFillBorderKernel>();
_border_mag_gradient->configure(&_magnitude, _non_max_suppr->border_size(), border_mode, constant_border_value);
// Allocate intermediate tensors
@@ -156,11 +155,11 @@ void NECannyEdge::configure(ITensor *input, ITensor *output, int32_t upper_thr,
_magnitude.allocator()->allocate();
// Configure edge tracing
- _edge_trace = arm_compute::support::cpp14::make_unique<NEEdgeTraceKernel>();
+ _edge_trace = std::make_unique<NEEdgeTraceKernel>();
_edge_trace->configure(&_nonmax, output);
// Fill border with "No edge" to stop recursion in edge trace
- _border_edge_trace = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _border_edge_trace = std::make_unique<NEFillBorderKernel>();
_border_edge_trace->configure(&_nonmax, _edge_trace->border_size(), BorderMode::CONSTANT, static_cast<float>(0.f));
// Allocate intermediate tensors
diff --git a/src/runtime/NEON/functions/NECast.cpp b/src/runtime/NEON/functions/NECast.cpp
index 7fd2605fd2..a42f512ce6 100644
--- a/src/runtime/NEON/functions/NECast.cpp
+++ b/src/runtime/NEON/functions/NECast.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/TensorInfo.h"
#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ namespace arm_compute
{
void NECast::configure(ITensor *input, ITensor *output, ConvertPolicy policy)
{
- auto k = arm_compute::support::cpp14::make_unique<NEDepthConvertLayerKernel>();
+ auto k = std::make_unique<NEDepthConvertLayerKernel>();
k->configure(input, output, policy, 0);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEChannelCombine.cpp b/src/runtime/NEON/functions/NEChannelCombine.cpp
index f8a9be0313..b566153bf4 100644
--- a/src/runtime/NEON/functions/NEChannelCombine.cpp
+++ b/src/runtime/NEON/functions/NEChannelCombine.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEChannelCombine.h"
#include "src/core/NEON/kernels/NEChannelCombineKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,14 +31,14 @@ using namespace arm_compute;
void NEChannelCombine::configure(const ITensor *plane0, const ITensor *plane1, const ITensor *plane2, const ITensor *plane3, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEChannelCombineKernel>();
+ auto k = std::make_unique<NEChannelCombineKernel>();
k->configure(plane0, plane1, plane2, plane3, output);
_kernel = std::move(k);
}
void NEChannelCombine::configure(const IImage *plane0, const IImage *plane1, const IImage *plane2, IMultiImage *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEChannelCombineKernel>();
+ auto k = std::make_unique<NEChannelCombineKernel>();
k->configure(plane0, plane1, plane2, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEChannelExtract.cpp b/src/runtime/NEON/functions/NEChannelExtract.cpp
index 8f5e4d47d9..a43dc28896 100644
--- a/src/runtime/NEON/functions/NEChannelExtract.cpp
+++ b/src/runtime/NEON/functions/NEChannelExtract.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEChannelExtract.h"
#include "src/core/NEON/kernels/NEChannelExtractKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,14 +31,14 @@ using namespace arm_compute;
void NEChannelExtract::configure(const ITensor *input, Channel channel, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEChannelExtractKernel>();
+ auto k = std::make_unique<NEChannelExtractKernel>();
k->configure(input, channel, output);
_kernel = std::move(k);
}
void NEChannelExtract::configure(const IMultiImage *input, Channel channel, IImage *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEChannelExtractKernel>();
+ auto k = std::make_unique<NEChannelExtractKernel>();
k->configure(input, channel, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEChannelShuffleLayer.cpp b/src/runtime/NEON/functions/NEChannelShuffleLayer.cpp
index c72dec67ee..bf4af83a0d 100644
--- a/src/runtime/NEON/functions/NEChannelShuffleLayer.cpp
+++ b/src/runtime/NEON/functions/NEChannelShuffleLayer.cpp
@@ -25,13 +25,12 @@
#include "arm_compute/core/Types.h"
#include "src/core/NEON/kernels/NEChannelShuffleLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEChannelShuffleLayer::configure(const ITensor *input, ITensor *output, unsigned int num_groups)
{
- auto k = arm_compute::support::cpp14::make_unique<NEChannelShuffleLayerKernel>();
+ auto k = std::make_unique<NEChannelShuffleLayerKernel>();
k->configure(input, output, num_groups);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NECol2Im.cpp b/src/runtime/NEON/functions/NECol2Im.cpp
index 0706125157..fc61520f47 100644
--- a/src/runtime/NEON/functions/NECol2Im.cpp
+++ b/src/runtime/NEON/functions/NECol2Im.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/NEON/functions/NECol2Im.h"
#include "src/core/NEON/kernels/NECol2ImKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NECol2Im::configure(const ITensor *input, ITensor *output, const Size2D &convolved_dims)
{
- auto k = arm_compute::support::cpp14::make_unique<NECol2ImKernel>();
+ auto k = std::make_unique<NECol2ImKernel>();
k->configure(input, output, convolved_dims);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEColorConvert.cpp b/src/runtime/NEON/functions/NEColorConvert.cpp
index ebdd1046ce..c7c9cdd923 100644
--- a/src/runtime/NEON/functions/NEColorConvert.cpp
+++ b/src/runtime/NEON/functions/NEColorConvert.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEColorConvert.h"
#include "src/core/NEON/kernels/NEColorConvertKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,28 +31,28 @@ using namespace arm_compute;
void NEColorConvert::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEColorConvertKernel>();
+ auto k = std::make_unique<NEColorConvertKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
void NEColorConvert::configure(const IMultiImage *input, IImage *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEColorConvertKernel>();
+ auto k = std::make_unique<NEColorConvertKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
void NEColorConvert::configure(const IImage *input, IMultiImage *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEColorConvertKernel>();
+ auto k = std::make_unique<NEColorConvertKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
void NEColorConvert::configure(const IMultiImage *input, IMultiImage *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEColorConvertKernel>();
+ auto k = std::make_unique<NEColorConvertKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEComputeAllAnchors.cpp b/src/runtime/NEON/functions/NEComputeAllAnchors.cpp
index 3f5712dd3a..a305ca0708 100644
--- a/src/runtime/NEON/functions/NEComputeAllAnchors.cpp
+++ b/src/runtime/NEON/functions/NEComputeAllAnchors.cpp
@@ -24,14 +24,13 @@
#include "arm_compute/runtime/NEON/functions/NEComputeAllAnchors.h"
#include "src/core/NEON/kernels/NEGenerateProposalsLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEComputeAllAnchors::configure(const ITensor *anchors, ITensor *all_anchors, const ComputeAnchorsInfo &info)
{
// Configure ComputeAllAnchors kernel
- auto k = arm_compute::support::cpp14::make_unique<NEComputeAllAnchorsKernel>();
+ auto k = std::make_unique<NEComputeAllAnchorsKernel>();
k->configure(anchors, all_anchors, info);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
index 03a01aec6b..782f8f1ff7 100644
--- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
@@ -36,7 +36,6 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Types.h"
#include "src/core/helpers/AutoConfiguration.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -68,28 +67,28 @@ void NEConcatenation::configure(const std::vector<const ITensorInfo *> &inputs_v
{
case Window::DimX:
{
- auto kernel = support::cpp14::make_unique<NEWidthConcatenateLayerKernel>();
+ auto kernel = std::make_unique<NEWidthConcatenateLayerKernel>();
kernel->configure(inputs_vector.at(i), offset, output);
_concat_kernels.emplace_back(std::move(kernel));
break;
}
case Window::DimY:
{
- auto kernel = support::cpp14::make_unique<NEHeightConcatenateLayerKernel>();
+ auto kernel = std::make_unique<NEHeightConcatenateLayerKernel>();
kernel->configure(inputs_vector.at(i), offset, output);
_concat_kernels.emplace_back(std::move(kernel));
break;
}
case Window::DimZ:
{
- auto kernel = support::cpp14::make_unique<NEDepthConcatenateLayerKernel>();
+ auto kernel = std::make_unique<NEDepthConcatenateLayerKernel>();
kernel->configure(inputs_vector.at(i), offset, output);
_concat_kernels.emplace_back(std::move(kernel));
break;
}
case 3:
{
- auto kernel = support::cpp14::make_unique<NEBatchConcatenateLayerKernel>();
+ auto kernel = std::make_unique<NEBatchConcatenateLayerKernel>();
kernel->configure(inputs_vector.at(i), offset, output);
_concat_kernels.emplace_back(std::move(kernel));
break;
@@ -181,7 +180,7 @@ struct NEConcatenateLayer::Impl
};
NEConcatenateLayer::NEConcatenateLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -199,7 +198,7 @@ void NEConcatenateLayer::configure(std::vector<const ITensor *> inputs_vector, I
_impl->dst = output;
_impl->axis = axis;
_impl->num_inputs = inputs_vector.size();
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEConcatenation>();
+ _impl->op = std::make_unique<experimental::NEConcatenation>();
std::vector<const ITensorInfo *> inputs_vector_info;
for(unsigned int i = 0; i < inputs_vector.size(); ++i)
diff --git a/src/runtime/NEON/functions/NEConvertFullyConnectedWeights.cpp b/src/runtime/NEON/functions/NEConvertFullyConnectedWeights.cpp
index 291afe0273..a6a7746830 100644
--- a/src/runtime/NEON/functions/NEConvertFullyConnectedWeights.cpp
+++ b/src/runtime/NEON/functions/NEConvertFullyConnectedWeights.cpp
@@ -23,7 +23,6 @@
*/
#include "arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h"
#include "src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -37,7 +36,7 @@ NEConvertFullyConnectedWeights::NEConvertFullyConnectedWeights()
void NEConvertFullyConnectedWeights::configure(const ITensor *input, ITensor *output, const TensorShape &original_input_shape,
DataLayout data_layout)
{
- _kernel = arm_compute::support::cpp14::make_unique<NEConvertFullyConnectedWeightsKernel>();
+ _kernel = std::make_unique<NEConvertFullyConnectedWeightsKernel>();
_kernel->configure(input, output, original_input_shape, data_layout);
}
diff --git a/src/runtime/NEON/functions/NEConvolution.cpp b/src/runtime/NEON/functions/NEConvolution.cpp
index 07ac8bd42b..680d8f628f 100644
--- a/src/runtime/NEON/functions/NEConvolution.cpp
+++ b/src/runtime/NEON/functions/NEConvolution.cpp
@@ -34,7 +34,6 @@
#include "src/core/NEON/kernels/NEConvolutionKernel.h"
#include "src/core/NEON/kernels/NEConvolutionKernel.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <array>
#include <utility>
@@ -45,11 +44,11 @@ NEConvolution3x3::~NEConvolution3x3() = default;
void NEConvolution3x3::configure(ITensor *input, ITensor *output, const int16_t *conv, uint32_t scale, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<NEConvolution3x3Kernel>();
+ auto k = std::make_unique<NEConvolution3x3Kernel>();
k->configure(input, output, conv, scale, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
@@ -76,7 +75,7 @@ void NEConvolutionSquare<matrix_size>::configure(ITensor *input, ITensor *output
_is_separable = separate_matrix(conv, conv_col.data(), conv_row.data(), matrix_size);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
if(_is_separable)
{
DataType intermediate_type = DataType::UNKNOWN;
@@ -93,8 +92,8 @@ void NEConvolutionSquare<matrix_size>::configure(ITensor *input, ITensor *output
scale = calculate_matrix_scale(conv, matrix_size);
}
- _kernel_hor = arm_compute::support::cpp14::make_unique<NESeparableConvolutionHorKernel<matrix_size>>();
- _kernel_vert = arm_compute::support::cpp14::make_unique<NESeparableConvolutionVertKernel<matrix_size>>();
+ _kernel_hor = std::make_unique<NESeparableConvolutionHorKernel<matrix_size>>();
+ _kernel_vert = std::make_unique<NESeparableConvolutionVertKernel<matrix_size>>();
_kernel_hor->configure(input, &_tmp, conv_row.data(), border_mode == BorderMode::UNDEFINED);
_kernel_vert->configure(&_tmp, output, conv_col.data(), scale, border_mode == BorderMode::UNDEFINED);
@@ -105,7 +104,7 @@ void NEConvolutionSquare<matrix_size>::configure(ITensor *input, ITensor *output
}
else
{
- _kernel = arm_compute::support::cpp14::make_unique<NEConvolutionKernel<matrix_size>>();
+ _kernel = std::make_unique<NEConvolutionKernel<matrix_size>>();
_kernel->configure(input, output, conv, scale, border_mode == BorderMode::UNDEFINED);
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
}
@@ -138,11 +137,11 @@ NEConvolutionRectangle::~NEConvolutionRectangle() = default;
void NEConvolutionRectangle::configure(ITensor *input, ITensor *output, const int16_t *conv, uint32_t rows, uint32_t cols, uint32_t scale, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<NEConvolutionRectangleKernel>();
+ auto k = std::make_unique<NEConvolutionRectangleKernel>();
k->configure(input, output, conv, rows, cols, scale, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index cc5f160787..cc549ca31b 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -33,8 +33,6 @@
#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
-#include "support/MemorySupport.h"
-
#include <cmath>
#include <tuple>
#include <utility>
@@ -61,35 +59,35 @@ void NEConvolutionLayer::configure(ITensor *input, const ITensor *weights, const
{
case ConvolutionMethod::WINOGRAD:
{
- auto f = arm_compute::support::cpp14::make_unique<NEWinogradConvolutionLayer>(_memory_manager);
+ auto f = std::make_unique<NEWinogradConvolutionLayer>(_memory_manager);
f->configure(input, weights, biases, output, conv_info, act_info, enable_fast_math);
_function = std::move(f);
break;
}
case ConvolutionMethod::GEMM:
{
- auto f = arm_compute::support::cpp14::make_unique<NEGEMMConvolutionLayer>(_memory_manager);
+ auto f = std::make_unique<NEGEMMConvolutionLayer>(_memory_manager);
f->configure(input, weights, biases, output, conv_info, weights_info, dilation, act_info);
_function = std::move(f);
break;
}
case ConvolutionMethod::GEMM_CONV2D:
{
- auto f = arm_compute::support::cpp14::make_unique<NEGEMMConv2d>(_memory_manager);
+ auto f = std::make_unique<NEGEMMConv2d>(_memory_manager);
f->configure(input, weights, biases, output, info);
_function = std::move(f);
break;
}
case ConvolutionMethod::DIRECT:
{
- auto f = arm_compute::support::cpp14::make_unique<NEDirectConvolutionLayer>(_memory_manager);
+ auto f = std::make_unique<NEDirectConvolutionLayer>(_memory_manager);
f->configure(input, weights, biases, output, conv_info, act_info);
_function = std::move(f);
break;
}
case ConvolutionMethod::FFT:
{
- auto f = arm_compute::support::cpp14::make_unique<NEFFTConvolutionLayer>(_memory_manager);
+ auto f = std::make_unique<NEFFTConvolutionLayer>(_memory_manager);
f->configure(input, weights, biases, output, conv_info, act_info);
_function = std::move(f);
break;
diff --git a/src/runtime/NEON/functions/NECopy.cpp b/src/runtime/NEON/functions/NECopy.cpp
index 9e7bf40559..11707cbd4c 100644
--- a/src/runtime/NEON/functions/NECopy.cpp
+++ b/src/runtime/NEON/functions/NECopy.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NECopy.h"
#include "src/core/NEON/kernels/NECopyKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ NECopy::~NECopy() = default;
void NECopy::configure(ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NECopyKernel>();
+ auto k = std::make_unique<NECopyKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NECropResize.cpp b/src/runtime/NEON/functions/NECropResize.cpp
index 2e2d2251b6..af85cac7da 100644
--- a/src/runtime/NEON/functions/NECropResize.cpp
+++ b/src/runtime/NEON/functions/NECropResize.cpp
@@ -26,8 +26,6 @@
#include "arm_compute/runtime/NEON/functions/NECropResize.h"
#include "src/core/NEON/kernels/NECropKernel.h"
-#include "support/MemorySupport.h"
-
#include <cstddef>
namespace arm_compute
@@ -82,18 +80,18 @@ void NECropResize::configure(const ITensor *input, const ITensor *boxes, const I
for(unsigned int i = 0; i < _num_boxes; ++i)
{
- auto crop_tensor = support::cpp14::make_unique<Tensor>();
+ auto crop_tensor = std::make_unique<Tensor>();
TensorInfo crop_result_info(1, DataType::F32);
crop_result_info.set_data_layout(DataLayout::NHWC);
crop_tensor->allocator()->init(crop_result_info);
- auto scale_tensor = support::cpp14::make_unique<Tensor>();
+ auto scale_tensor = std::make_unique<Tensor>();
TensorInfo scaled_result_info(out_shape, 1, DataType::F32);
scaled_result_info.set_data_layout(DataLayout::NHWC);
scale_tensor->allocator()->init(scaled_result_info);
- auto crop_kernel = support::cpp14::make_unique<NECropKernel>();
- auto scale_kernel = support::cpp14::make_unique<NEScale>();
+ auto crop_kernel = std::make_unique<NECropKernel>();
+ auto scale_kernel = std::make_unique<NEScale>();
crop_kernel->configure(input, boxes, box_ind, crop_tensor.get(), i, _extrapolation_value);
_crop.emplace_back(std::move(crop_kernel));
diff --git a/src/runtime/NEON/functions/NEDepthConvertLayer.cpp b/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
index af0f5efb69..761de8eb60 100644
--- a/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,7 +31,7 @@ using namespace arm_compute;
void NEDepthConvertLayer::configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
{
- auto k = arm_compute::support::cpp14::make_unique<NEDepthConvertLayerKernel>();
+ auto k = std::make_unique<NEDepthConvertLayerKernel>();
k->configure(input, output, policy, shift);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEDepthToSpaceLayer.cpp b/src/runtime/NEON/functions/NEDepthToSpaceLayer.cpp
index c4f15e3b68..2793c3f27e 100644
--- a/src/runtime/NEON/functions/NEDepthToSpaceLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthToSpaceLayer.cpp
@@ -30,13 +30,11 @@
#include "arm_compute/core/Validate.h"
#include "src/core/NEON/kernels/NEDepthToSpaceLayerKernel.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
void NEDepthToSpaceLayer::configure(const ITensor *input, ITensor *output, int32_t block_shape)
{
- auto k = arm_compute::support::cpp14::make_unique<NEDepthToSpaceLayerKernel>();
+ auto k = std::make_unique<NEDepthToSpaceLayerKernel>();
k->configure(input, output, block_shape);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index fc97279211..d17f6b5cd9 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEDepthwiseConvolutionLayerNativeKernel.h"
-#include "support/MemorySupport.h"
using namespace arm_compute::misc;
using namespace arm_compute::misc::shape_calculator;
@@ -246,7 +245,7 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerGeneric::configure(
}
_original_weights = weights_to_use;
- _depthwise_conv_kernel = arm_compute::support::cpp14::make_unique<NEDepthwiseConvolutionLayerNativeKernel>();
+ _depthwise_conv_kernel = std::make_unique<NEDepthwiseConvolutionLayerNativeKernel>();
_depthwise_conv_kernel->configure(input_to_use, weights_to_use, biases, output_to_use, conv_info, depth_multiplier, dilation);
if(_is_nchw)
diff --git a/src/runtime/NEON/functions/NEDequantizationLayer.cpp b/src/runtime/NEON/functions/NEDequantizationLayer.cpp
index 0c0f86c82b..a345840f4f 100644
--- a/src/runtime/NEON/functions/NEDequantizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEDequantizationLayer.cpp
@@ -25,13 +25,12 @@
#include "arm_compute/runtime/NEON/functions/NEDequantizationLayer.h"
#include "src/core/NEON/kernels/NEDequantizationLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEDequantizationLayer::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEDequantizationLayerKernel>();
+ auto k = std::make_unique<NEDequantizationLayerKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEDerivative.cpp b/src/runtime/NEON/functions/NEDerivative.cpp
index f007e9fda3..8ef42123db 100644
--- a/src/runtime/NEON/functions/NEDerivative.cpp
+++ b/src/runtime/NEON/functions/NEDerivative.cpp
@@ -29,7 +29,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEDerivativeKernel.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -45,8 +44,8 @@ void NEDerivative::configure(ITensor *input, ITensor *output_x, ITensor *output_
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON((output_x == nullptr) && (output_y == nullptr));
- _kernel = arm_compute::support::cpp14::make_unique<NEDerivativeKernel>();
- _border_handler = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _kernel = std::make_unique<NEDerivativeKernel>();
+ _border_handler = std::make_unique<NEFillBorderKernel>();
_kernel->configure(input, output_x, output_y, border_mode == BorderMode::UNDEFINED);
_border_handler->configure(input, BorderSize(1), border_mode, PixelValue(constant_border_value));
diff --git a/src/runtime/NEON/functions/NEDilate.cpp b/src/runtime/NEON/functions/NEDilate.cpp
index 70c0b61639..56523abd8a 100644
--- a/src/runtime/NEON/functions/NEDilate.cpp
+++ b/src/runtime/NEON/functions/NEDilate.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/NEON/kernels/NEDilateKernel.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,11 +33,11 @@ using namespace arm_compute;
void NEDilate::configure(ITensor *input, ITensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<NEDilateKernel>();
+ auto k = std::make_unique<NEDilateKernel>();
k->configure(input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
index 98d6386ffe..a953edc78f 100644
--- a/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDirectConvolutionLayer.cpp
@@ -30,7 +30,6 @@
#include "src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h"
#include "src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -45,9 +44,9 @@ NEDirectConvolutionLayer::NEDirectConvolutionLayer(std::shared_ptr<IMemoryManage
void NEDirectConvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::UNKNOWN);
- _output_stage_kernel = arm_compute::support::cpp14::make_unique<NEDirectConvolutionLayerOutputStageKernel>();
- _conv_kernel = arm_compute::support::cpp14::make_unique<NEDirectConvolutionLayerKernel>();
- _input_border_handler = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _output_stage_kernel = std::make_unique<NEDirectConvolutionLayerOutputStageKernel>();
+ _conv_kernel = std::make_unique<NEDirectConvolutionLayerKernel>();
+ _input_border_handler = std::make_unique<NEFillBorderKernel>();
// Free accumulator
if(_accumulator.buffer() != nullptr)
diff --git a/src/runtime/NEON/functions/NEElementwiseOperators.cpp b/src/runtime/NEON/functions/NEElementwiseOperators.cpp
index 7f3fe8b30b..badcf2e997 100644
--- a/src/runtime/NEON/functions/NEElementwiseOperators.cpp
+++ b/src/runtime/NEON/functions/NEElementwiseOperators.cpp
@@ -26,7 +26,6 @@
#include <src/core/NEON/kernels/NEElementwiseOperationKernel.h>
#include "arm_compute/core/ITensor.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -36,7 +35,7 @@ namespace experimental
{
void NEElementwiseMax::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
+ auto k = std::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::MAX, input1, input2, output);
_kernel = std::move(k);
}
@@ -48,7 +47,7 @@ Status NEElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *
void NEElementwiseMin::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
+ auto k = std::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::MIN, input1, input2, output);
_kernel = std::move(k);
}
@@ -60,7 +59,7 @@ Status NEElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *
void NEElementwiseSquaredDiff::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
+ auto k = std::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::SQUARED_DIFF, input1, input2, output);
_kernel = std::move(k);
}
@@ -72,7 +71,7 @@ Status NEElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITens
void NEElementwiseDivision::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEDivisionOperationKernel>();
+ auto k = std::make_unique<NEDivisionOperationKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
@@ -84,7 +83,7 @@ Status NEElementwiseDivision::validate(const ITensorInfo *input1, const ITensorI
void NEElementwisePower::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEPowerOperationKernel>();
+ auto k = std::make_unique<NEPowerOperationKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
@@ -97,7 +96,7 @@ Status NEElementwisePower::validate(const ITensorInfo *input1, const ITensorInfo
template <ComparisonOperation COP>
void NEElementwiseComparisonStatic<COP>::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEComparisonOperationKernel>();
+ auto k = std::make_unique<NEComparisonOperationKernel>();
k->configure(COP, input1, input2, output);
_kernel = std::move(k);
}
@@ -110,7 +109,7 @@ Status NEElementwiseComparisonStatic<COP>::validate(const ITensorInfo *input1, c
void NEElementwiseComparison::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, ComparisonOperation op)
{
- auto k = arm_compute::support::cpp14::make_unique<NEComparisonOperationKernel>();
+ auto k = std::make_unique<NEComparisonOperationKernel>();
k->configure(op, input1, input2, output);
_kernel = std::move(k);
}
@@ -138,7 +137,7 @@ struct NEElementwiseMax::Impl
};
NEElementwiseMax::NEElementwiseMax()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEElementwiseMax::NEElementwiseMax(NEElementwiseMax &&) = default;
@@ -151,7 +150,7 @@ void NEElementwiseMax::configure(ITensor *input1, ITensor *input2, ITensor *outp
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwiseMax>();
+ _impl->op = std::make_unique<experimental::NEElementwiseMax>();
_impl->op->configure(input1->info(), input2->info(), output->info());
}
@@ -179,7 +178,7 @@ struct NEElementwiseMin::Impl
};
NEElementwiseMin::NEElementwiseMin()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEElementwiseMin::NEElementwiseMin(NEElementwiseMin &&) = default;
@@ -192,7 +191,7 @@ void NEElementwiseMin::configure(ITensor *input1, ITensor *input2, ITensor *outp
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwiseMin>();
+ _impl->op = std::make_unique<experimental::NEElementwiseMin>();
_impl->op->configure(input1->info(), input2->info(), output->info());
}
@@ -220,7 +219,7 @@ struct NEElementwiseSquaredDiff::Impl
};
NEElementwiseSquaredDiff::NEElementwiseSquaredDiff()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEElementwiseSquaredDiff::NEElementwiseSquaredDiff(NEElementwiseSquaredDiff &&) = default;
@@ -233,7 +232,7 @@ void NEElementwiseSquaredDiff::configure(ITensor *input1, ITensor *input2, ITens
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwiseSquaredDiff>();
+ _impl->op = std::make_unique<experimental::NEElementwiseSquaredDiff>();
_impl->op->configure(input1->info(), input2->info(), output->info());
}
@@ -261,7 +260,7 @@ struct NEElementwiseDivision::Impl
};
NEElementwiseDivision::NEElementwiseDivision()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEElementwiseDivision::NEElementwiseDivision(NEElementwiseDivision &&) = default;
@@ -274,7 +273,7 @@ void NEElementwiseDivision::configure(ITensor *input1, ITensor *input2, ITensor
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwiseDivision>();
+ _impl->op = std::make_unique<experimental::NEElementwiseDivision>();
_impl->op->configure(input1->info(), input2->info(), output->info());
}
@@ -302,7 +301,7 @@ struct NEElementwisePower::Impl
};
NEElementwisePower::NEElementwisePower()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEElementwisePower::NEElementwisePower(NEElementwisePower &&) = default;
@@ -315,7 +314,7 @@ void NEElementwisePower::configure(ITensor *input1, ITensor *input2, ITensor *ou
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwisePower>();
+ _impl->op = std::make_unique<experimental::NEElementwisePower>();
_impl->op->configure(input1->info(), input2->info(), output->info());
}
@@ -345,7 +344,7 @@ struct NEElementwiseComparisonStatic<COP>::Impl
template <ComparisonOperation COP>
NEElementwiseComparisonStatic<COP>::NEElementwiseComparisonStatic()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
template <ComparisonOperation COP>
@@ -361,7 +360,7 @@ void NEElementwiseComparisonStatic<COP>::configure(ITensor *input1, ITensor *inp
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwiseComparisonStatic<COP>>();
+ _impl->op = std::make_unique<experimental::NEElementwiseComparisonStatic<COP>>();
_impl->op->configure(input1->info(), input2->info(), output->info());
}
@@ -390,7 +389,7 @@ struct NEElementwiseComparison::Impl
};
NEElementwiseComparison::NEElementwiseComparison()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEElementwiseComparison::NEElementwiseComparison(NEElementwiseComparison &&) = default;
@@ -402,7 +401,7 @@ void NEElementwiseComparison::configure(ITensor *input1, ITensor *input2, ITenso
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwiseComparison>();
+ _impl->op = std::make_unique<experimental::NEElementwiseComparison>();
_impl->op->configure(input1->info(), input2->info(), output->info(), op);
}
diff --git a/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp b/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp
index 5e130205d2..5c779f1489 100644
--- a/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp
+++ b/src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h"
#include "src/core/NEON/kernels/NEElementwiseUnaryKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,7 +31,7 @@ namespace arm_compute
{
void NERsqrtLayer::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEElementwiseUnaryKernel>();
+ auto k = std::make_unique<NEElementwiseUnaryKernel>();
k->configure(ElementWiseUnary::RSQRT, input, output);
_kernel = std::move(k);
}
@@ -43,7 +42,7 @@ Status NERsqrtLayer::validate(const ITensorInfo *input, const ITensorInfo *outpu
void NEExpLayer::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEElementwiseUnaryKernel>();
+ auto k = std::make_unique<NEElementwiseUnaryKernel>();
k->configure(ElementWiseUnary::EXP, input, output);
_kernel = std::move(k);
}
@@ -54,7 +53,7 @@ Status NEExpLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
void NENegLayer::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEElementwiseUnaryKernel>();
+ auto k = std::make_unique<NEElementwiseUnaryKernel>();
k->configure(ElementWiseUnary::NEG, input, output);
_kernel = std::move(k);
}
@@ -65,7 +64,7 @@ Status NENegLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
void NELogLayer::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEElementwiseUnaryKernel>();
+ auto k = std::make_unique<NEElementwiseUnaryKernel>();
k->configure(ElementWiseUnary::LOG, input, output);
_kernel = std::move(k);
}
@@ -76,7 +75,7 @@ Status NELogLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
void NEAbsLayer::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEElementwiseUnaryKernel>();
+ auto k = std::make_unique<NEElementwiseUnaryKernel>();
k->configure(ElementWiseUnary::ABS, input, output);
_kernel = std::move(k);
}
@@ -87,7 +86,7 @@ Status NEAbsLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
void NERoundLayer::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEElementwiseUnaryKernel>();
+ auto k = std::make_unique<NEElementwiseUnaryKernel>();
k->configure(ElementWiseUnary::ROUND, input, output);
_kernel = std::move(k);
}
@@ -98,7 +97,7 @@ Status NERoundLayer::validate(const ITensorInfo *input, const ITensorInfo *outpu
void NESinLayer::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEElementwiseUnaryKernel>();
+ auto k = std::make_unique<NEElementwiseUnaryKernel>();
k->configure(ElementWiseUnary::SIN, input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEEqualizeHistogram.cpp b/src/runtime/NEON/functions/NEEqualizeHistogram.cpp
index d3ff171323..0b83b7dac7 100644
--- a/src/runtime/NEON/functions/NEEqualizeHistogram.cpp
+++ b/src/runtime/NEON/functions/NEEqualizeHistogram.cpp
@@ -32,7 +32,6 @@
#include "src/core/NEON/kernels/NEHistogramKernel.h"
#include "src/core/NEON/kernels/NEHistogramKernel.h"
#include "src/core/NEON/kernels/NETableLookupKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -50,9 +49,9 @@ void NEEqualizeHistogram::configure(const IImage *input, IImage *output)
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
- _histogram_kernel = arm_compute::support::cpp14::make_unique<NEHistogramKernel>();
- _cd_histogram_kernel = arm_compute::support::cpp14::make_unique<NECumulativeDistributionKernel>();
- _map_histogram_kernel = arm_compute::support::cpp14::make_unique<NETableLookupKernel>();
+ _histogram_kernel = std::make_unique<NEHistogramKernel>();
+ _cd_histogram_kernel = std::make_unique<NECumulativeDistributionKernel>();
+ _map_histogram_kernel = std::make_unique<NETableLookupKernel>();
// Configure kernels
_histogram_kernel->configure(input, &_hist);
diff --git a/src/runtime/NEON/functions/NEErode.cpp b/src/runtime/NEON/functions/NEErode.cpp
index 748694fe3f..83e266140a 100644
--- a/src/runtime/NEON/functions/NEErode.cpp
+++ b/src/runtime/NEON/functions/NEErode.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/NEON/kernels/NEErodeKernel.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,11 +33,11 @@ namespace arm_compute
{
void NEErode::configure(ITensor *input, ITensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<NEErodeKernel>();
+ auto k = std::make_unique<NEErodeKernel>();
k->configure(input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NEFFT1D.cpp b/src/runtime/NEON/functions/NEFFT1D.cpp
index b94c25832a..e72488f0f6 100644
--- a/src/runtime/NEON/functions/NEFFT1D.cpp
+++ b/src/runtime/NEON/functions/NEFFT1D.cpp
@@ -30,7 +30,6 @@
#include "src/core/NEON/kernels/NEFFTRadixStageKernel.h"
#include "src/core/NEON/kernels/NEFFTScaleKernel.h"
#include "src/core/utils/helpers/fft.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -64,7 +63,7 @@ void NEFFT1D::configure(const ITensor *input, ITensor *output, const FFT1DInfo &
TensorInfo digit_reverse_indices_info(TensorShape(input->info()->tensor_shape()[config.axis]), 1, DataType::U32);
_digit_reverse_indices.allocator()->init(digit_reverse_indices_info);
_memory_group.manage(&_digit_reversed_input);
- _digit_reverse_kernel = arm_compute::support::cpp14::make_unique<NEFFTDigitReverseKernel>();
+ _digit_reverse_kernel = std::make_unique<NEFFTDigitReverseKernel>();
_digit_reverse_kernel->configure(input, &_digit_reversed_input, &_digit_reverse_indices, digit_reverse_config);
// Create and configure FFT kernels
@@ -82,7 +81,7 @@ void NEFFT1D::configure(const ITensor *input, ITensor *output, const FFT1DInfo &
fft_kernel_info.radix = radix_for_stage;
fft_kernel_info.Nx = Nx;
fft_kernel_info.is_first_stage = (i == 0);
- _fft_kernels[i] = arm_compute::support::cpp14::make_unique<NEFFTRadixStageKernel>();
+ _fft_kernels[i] = std::make_unique<NEFFTRadixStageKernel>();
_fft_kernels[i]->configure(&_digit_reversed_input, ((i == (_num_ffts - 1)) && !is_c2r) ? output : nullptr, fft_kernel_info);
Nx *= radix_for_stage;
@@ -94,7 +93,7 @@ void NEFFT1D::configure(const ITensor *input, ITensor *output, const FFT1DInfo &
FFTScaleKernelInfo scale_config;
scale_config.scale = static_cast<float>(N);
scale_config.conjugate = config.direction == FFTDirection::Inverse;
- _scale_kernel = arm_compute::support::cpp14::make_unique<NEFFTScaleKernel>();
+ _scale_kernel = std::make_unique<NEFFTScaleKernel>();
is_c2r ? _scale_kernel->configure(&_digit_reversed_input, output, scale_config) : _scale_kernel->configure(output, nullptr, scale_config);
}
diff --git a/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp b/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp
index 23788b7c39..bb6b5ed6b4 100644
--- a/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp
@@ -36,8 +36,6 @@
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/utils/helpers/fft.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
namespace
@@ -161,7 +159,7 @@ void NEFFTConvolutionLayer::configure(ITensor *input, const ITensor *weights, co
_pad_weights_func.configure(&_flipped_weights, &_padded_weights, padding_w);
// Transform weights
- _transform_weights_func = support::cpp14::make_unique<NEFFT2D>();
+ _transform_weights_func = std::make_unique<NEFFT2D>();
_transform_weights_func->configure(&_padded_weights, &_transformed_weights, FFT2DInfo());
// Pad input
diff --git a/src/runtime/NEON/functions/NEFastCorners.cpp b/src/runtime/NEON/functions/NEFastCorners.cpp
index 1bde3cc508..5164d80947 100644
--- a/src/runtime/NEON/functions/NEFastCorners.cpp
+++ b/src/runtime/NEON/functions/NEFastCorners.cpp
@@ -35,7 +35,6 @@
#include "src/core/NEON/kernels/NEFillArrayKernel.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -68,9 +67,9 @@ void NEFastCorners::configure(IImage *input, float threshold, bool nonmax_suppre
_output.allocator()->init(tensor_info);
_memory_group.manage(&_output);
- _fast_corners_kernel = arm_compute::support::cpp14::make_unique<NEFastCornersKernel>();
- _border_handler = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
- _fill_kernel = arm_compute::support::cpp14::make_unique<NEFillArrayKernel>();
+ _fast_corners_kernel = std::make_unique<NEFastCornersKernel>();
+ _border_handler = std::make_unique<NEFillBorderKernel>();
+ _fill_kernel = std::make_unique<NEFillArrayKernel>();
// If border is UNDEFINED _fast_corners_kernel will operate in xwindow (3,
// width - 3) and ywindow (3, height -3) so the output image will leave the
// pixels on the borders unchanged. This is reflected in the valid region
@@ -87,7 +86,7 @@ void NEFastCorners::configure(IImage *input, float threshold, bool nonmax_suppre
{
_suppressed.allocator()->init(tensor_info);
_memory_group.manage(&_suppressed);
- _nonmax_kernel = arm_compute::support::cpp14::make_unique<NENonMaximaSuppression3x3Kernel>();
+ _nonmax_kernel = std::make_unique<NENonMaximaSuppression3x3Kernel>();
_nonmax_kernel->configure(&_output, &_suppressed, BorderMode::UNDEFINED == border_mode);
_fill_kernel->configure(&_suppressed, 1 /* we keep all texels >0 */, corners);
diff --git a/src/runtime/NEON/functions/NEFill.cpp b/src/runtime/NEON/functions/NEFill.cpp
index 68292c9ee0..74e366ab49 100644
--- a/src/runtime/NEON/functions/NEFill.cpp
+++ b/src/runtime/NEON/functions/NEFill.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/Window.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEMemsetKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,7 +33,7 @@ namespace arm_compute
{
void NEFill::configure(ITensor *tensor, PixelValue constant_value)
{
- auto k = arm_compute::support::cpp14::make_unique<NEMemsetKernel>();
+ auto k = std::make_unique<NEMemsetKernel>();
k->configure(tensor, constant_value);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEFillBorder.cpp b/src/runtime/NEON/functions/NEFillBorder.cpp
index e96069f97c..bb57222eb4 100644
--- a/src/runtime/NEON/functions/NEFillBorder.cpp
+++ b/src/runtime/NEON/functions/NEFillBorder.cpp
@@ -26,13 +26,12 @@
#include "arm_compute/core/Window.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEFillBorder::configure(ITensor *input, unsigned int border_width, BorderMode border_mode, const PixelValue &constant_border_value)
{
- _border_handler = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _border_handler = std::make_unique<NEFillBorderKernel>();
_border_handler->configure(input, BorderSize(border_width), border_mode, constant_border_value);
}
diff --git a/src/runtime/NEON/functions/NEFlattenLayer.cpp b/src/runtime/NEON/functions/NEFlattenLayer.cpp
index 4dfe96325e..21e55665cd 100644
--- a/src/runtime/NEON/functions/NEFlattenLayer.cpp
+++ b/src/runtime/NEON/functions/NEFlattenLayer.cpp
@@ -25,13 +25,12 @@
#include "arm_compute/core/Size2D.h"
#include "src/core/NEON/kernels/NEFlattenLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEFlattenLayer::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEFlattenLayerKernel>();
+ auto k = std::make_unique<NEFlattenLayerKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEFloor.cpp b/src/runtime/NEON/functions/NEFloor.cpp
index 5f6bd61017..74149e6f24 100644
--- a/src/runtime/NEON/functions/NEFloor.cpp
+++ b/src/runtime/NEON/functions/NEFloor.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/NEON/functions/NEFloor.h"
#include "src/core/NEON/kernels/NEFloorKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEFloor::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEFloorKernel>();
+ auto k = std::make_unique<NEFloorKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
index 6b0c27cf65..f12c410a59 100644
--- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
@@ -43,8 +43,6 @@
#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
#include "src/core/NEON/kernels/NETransposeKernel.h"
-#include "support/MemorySupport.h"
-
#include <algorithm>
#include <cmath>
@@ -148,7 +146,7 @@ Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const I
void NEFullyConnectedLayerReshapeWeights::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NETransposeKernel>();
+ auto k = std::make_unique<NETransposeKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
@@ -215,7 +213,7 @@ void NEFullyConnectedLayer::configure_conv_fc(const ITensor *input, const ITenso
// Configure flatten kernel
_memory_group.manage(&_flatten_output);
- _flatten_kernel = arm_compute::support::cpp14::make_unique<NEFlattenLayerKernel>();
+ _flatten_kernel = std::make_unique<NEFlattenLayerKernel>();
_flatten_kernel->configure(input, &_flatten_output);
// Configure matrix multiply kernel
diff --git a/src/runtime/NEON/functions/NEFuseBatchNormalization.cpp b/src/runtime/NEON/functions/NEFuseBatchNormalization.cpp
index c64fde050e..a8ce6b2bfc 100644
--- a/src/runtime/NEON/functions/NEFuseBatchNormalization.cpp
+++ b/src/runtime/NEON/functions/NEFuseBatchNormalization.cpp
@@ -29,7 +29,6 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEFuseBatchNormalizationKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -45,7 +44,7 @@ void NEFuseBatchNormalization::configure(const ITensor *input_weights, const ITe
const ITensor *input_bias, const ITensor *bn_beta, const ITensor *bn_gamma,
float epsilon, FuseBatchNormalizationType fbn_type)
{
- _fuse_bn_kernel = arm_compute::support::cpp14::make_unique<NEFuseBatchNormalizationKernel>();
+ _fuse_bn_kernel = std::make_unique<NEFuseBatchNormalizationKernel>();
_fuse_bn_kernel->configure(input_weights, bn_mean, bn_var, fused_weights, fused_bias, input_bias, bn_beta, bn_gamma, epsilon, fbn_type);
}
diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp
index 9f52e458d2..03f5aa37c1 100644
--- a/src/runtime/NEON/functions/NEGEMM.cpp
+++ b/src/runtime/NEON/functions/NEGEMM.cpp
@@ -39,7 +39,6 @@
#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
-#include "support/MemorySupport.h"
#include <cmath>
@@ -110,7 +109,7 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe
_memory_group.manage(&_tmp_d);
}
- _mm_kernel = arm_compute::support::cpp14::make_unique<NEGEMMMatrixMultiplyKernel>();
+ _mm_kernel = std::make_unique<NEGEMMMatrixMultiplyKernel>();
// Select between GEMV and GEMM
if(_run_vector_matrix_multiplication)
@@ -148,11 +147,11 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe
int k = a->info()->dimension(0);
// Configure interleave kernel
- _interleave_kernel = arm_compute::support::cpp14::make_unique<NEGEMMInterleave4x4Kernel>();
+ _interleave_kernel = std::make_unique<NEGEMMInterleave4x4Kernel>();
_interleave_kernel->configure(a, &_tmp_a);
// Configure transpose kernel
- _transpose_kernel = arm_compute::support::cpp14::make_unique<NEGEMMTranspose1xWKernel>();
+ _transpose_kernel = std::make_unique<NEGEMMTranspose1xWKernel>();
_transpose_kernel->configure(b, &_tmp_b);
// Configure matrix multiplication kernel
@@ -176,7 +175,7 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe
// Configure matrix addition kernel
if(_run_addition)
{
- _ma_kernel = arm_compute::support::cpp14::make_unique<NEGEMMMatrixAdditionKernel>();
+ _ma_kernel = std::make_unique<NEGEMMMatrixAdditionKernel>();
_ma_kernel->configure(c, d, beta);
}
diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
index f6739ee925..394f970e54 100644
--- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
@@ -28,8 +28,6 @@
#include "src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h"
#include "src/core/NEON/kernels/assembly/arm_gemm.hpp"
-#include "support/MemorySupport.h"
-
#include <arm_neon.h>
#include <cstdlib>
@@ -485,7 +483,7 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::configure(const ITensor *a, c
}
// arm_compute wrapper for the Gemm object (see above)
- std::unique_ptr<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>> acl_gemm_wrapper = support::cpp14::make_unique<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>>();
+ std::unique_ptr<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>> acl_gemm_wrapper = std::make_unique<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>>();
ARM_COMPUTE_ERROR_ON(acl_gemm_wrapper == nullptr);
acl_gemm_wrapper->configure(_gemm_kernel_asm.get(), gemm_cfg.filter);
const size_t workspace_size = _gemm_kernel_asm->get_working_size();
@@ -691,7 +689,7 @@ void create_arm_gemm(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &arm_gem
arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads);
// Create arm_gemm fallback
- auto fallback = support::cpp14::make_unique<Fallback<TypeInput, TypeOutput>>();
+ auto fallback = std::make_unique<Fallback<TypeInput, TypeOutput>>();
fallback->configure(a, b, c, d, args, info, memory_group, weights_manager);
arm_gemm = std::move(fallback);
}
@@ -709,7 +707,7 @@ void create_arm_gemm_quant(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &a
arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads);
// Create arm_gemm fallback
- auto fallback = support::cpp14::make_unique<Fallback<TypeInput, TypeOutput, arm_gemm::Requantize32>>();
+ auto fallback = std::make_unique<Fallback<TypeInput, TypeOutput, arm_gemm::Requantize32>>();
// Configure requantization info
const int32_t negation = info.negated_offsets ? 1 : -1;
diff --git a/src/runtime/NEON/functions/NEGEMMConv2d.cpp b/src/runtime/NEON/functions/NEGEMMConv2d.cpp
index 642b084fb4..860b6bb4e1 100644
--- a/src/runtime/NEON/functions/NEGEMMConv2d.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConv2d.cpp
@@ -25,7 +25,9 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+
#include <set>
+
namespace arm_compute
{
namespace
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index 3f50f81af2..a3bdde24b0 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -43,7 +43,6 @@
#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
#include "src/core/NEON/kernels/NEIm2ColKernel.h"
#include "src/core/NEON/kernels/NEWeightsReshapeKernel.h"
-#include "support/MemorySupport.h"
#include <set>
#include <tuple>
@@ -68,7 +67,7 @@ void NEConvolutionLayerReshapeWeights::configure(const ITensor *weights, const I
const bool append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
const ITensor *biases_to_use = (append_biases) ? biases : nullptr;
- _weights_reshape_kernel = arm_compute::support::cpp14::make_unique<NEWeightsReshapeKernel>();
+ _weights_reshape_kernel = std::make_unique<NEWeightsReshapeKernel>();
_weights_reshape_kernel->configure(weights, biases_to_use, output);
output->info()->set_quantization_info(weights->info()->quantization_info());
@@ -342,7 +341,7 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
_memory_group.manage(&_im2col_output);
// Configure
- _im2col_kernel = arm_compute::support::cpp14::make_unique<NEIm2ColKernel>();
+ _im2col_kernel = std::make_unique<NEIm2ColKernel>();
_im2col_kernel->configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, false, dilation);
// Update GEMM input
@@ -385,7 +384,7 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
if(_data_layout == DataLayout::NCHW)
{
// Configure col2im
- _col2im_kernel = arm_compute::support::cpp14::make_unique<NECol2ImKernel>();
+ _col2im_kernel = std::make_unique<NECol2ImKernel>();
_col2im_kernel->configure(gemm_output_to_use, output, Size2D(conv_w, conv_h));
}
else
diff --git a/src/runtime/NEON/functions/NEGEMMInterleave4x4.cpp b/src/runtime/NEON/functions/NEGEMMInterleave4x4.cpp
index 70fdcf492d..1e7a34bb35 100644
--- a/src/runtime/NEON/functions/NEGEMMInterleave4x4.cpp
+++ b/src/runtime/NEON/functions/NEGEMMInterleave4x4.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h"
#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEGEMMInterleave4x4::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMInterleave4x4Kernel>();
+ auto k = std::make_unique<NEGEMMInterleave4x4Kernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index df8eaacf47..d8f9d08c13 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -43,8 +43,6 @@
#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
namespace
@@ -106,7 +104,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
_signed_a.allocator()->init(a_to_use->info()->clone()->set_data_type(dt).set_quantization_info(QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction)));
_memory_group.manage(&_signed_a);
- _convert_to_signed_asymm = arm_compute::support::cpp14::make_unique<NEConvertQuantizedSignednessKernel>();
+ _convert_to_signed_asymm = std::make_unique<NEConvertQuantizedSignednessKernel>();
_convert_to_signed_asymm->configure(a_to_use, &_signed_a);
a_to_use = &_signed_a;
_a_offset = _signed_a.info()->quantization_info().uniform().offset;
@@ -182,11 +180,11 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
}
// Configure interleave kernel
- _mtx_a_reshape_kernel = arm_compute::support::cpp14::make_unique<NEGEMMInterleave4x4Kernel>();
+ _mtx_a_reshape_kernel = std::make_unique<NEGEMMInterleave4x4Kernel>();
_mtx_a_reshape_kernel->configure(a_to_use, &_tmp_a);
// Configure transpose kernel
- _mtx_b_reshape_kernel = arm_compute::support::cpp14::make_unique<NEGEMMTranspose1xWKernel>();
+ _mtx_b_reshape_kernel = std::make_unique<NEGEMMTranspose1xWKernel>();
_mtx_b_reshape_kernel->configure(b, &_tmp_b);
}
@@ -207,7 +205,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
}
// Configure Matrix B reduction kernel
- _mtx_b_reduction_kernel = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixBReductionKernel>();
+ _mtx_b_reduction_kernel = std::make_unique<NEGEMMLowpMatrixBReductionKernel>();
_mtx_b_reduction_kernel->configure(b, &_vector_sum_col, reduction_info);
}
@@ -220,7 +218,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
_memory_group.manage(&_vector_sum_row);
// Configure matrix A reduction kernel
- _mtx_a_reduction_kernel = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+ _mtx_a_reduction_kernel = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
_mtx_a_reduction_kernel->configure(a_to_use, &_vector_sum_row, reduction_info);
}
@@ -229,11 +227,11 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
// Configure matrix multiply kernel
if(!_assembly_path)
{
- _mm_kernel = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
+ _mm_kernel = std::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
_mm_kernel->configure(matrix_a, matrix_b, &_mm_result_s32);
}
- _offset_contribution_output_stage_kernel = arm_compute::support::cpp14::make_unique<NEGEMMLowpOffsetContributionOutputStageKernel>();
+ _offset_contribution_output_stage_kernel = std::make_unique<NEGEMMLowpOffsetContributionOutputStageKernel>();
_offset_contribution_output_stage_kernel->configure(&_mm_result_s32,
_a_offset == 0 ? nullptr : &_vector_sum_col,
_b_offset == 0 ? nullptr : &_vector_sum_row, c,
@@ -243,7 +241,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
if(_flip_signedness)
{
- _convert_from_signed_asymm = arm_compute::support::cpp14::make_unique<NEConvertQuantizedSignednessKernel>();
+ _convert_from_signed_asymm = std::make_unique<NEConvertQuantizedSignednessKernel>();
_convert_from_signed_asymm->configure(&_signed_output, output);
}
}
@@ -252,11 +250,11 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
// Configure matrix multiply kernel
if(!_assembly_path)
{
- _mm_kernel = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
+ _mm_kernel = std::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
_mm_kernel->configure(matrix_a, matrix_b, output);
}
// Configure offset contribution kernel
- _offset_contribution_kernel = arm_compute::support::cpp14::make_unique<NEGEMMLowpOffsetContributionKernel>();
+ _offset_contribution_kernel = std::make_unique<NEGEMMLowpOffsetContributionKernel>();
_offset_contribution_kernel->configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, a_to_use->info()->dimension(0), _a_offset, _b_offset);
}
diff --git a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp
index 9fb8851d7a..807785a534 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp
@@ -29,7 +29,6 @@
#include "src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h"
#include "src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h"
#include "src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -38,7 +37,7 @@ NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::~NEGEMMLowpQuantizeDownInt3
void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift,
int result_offset_after_shift, int min, int max)
{
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel>();
+ auto k = std::make_unique<NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel>();
k->configure(input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
_kernel = std::move(k);
}
@@ -53,7 +52,7 @@ NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::~NEGEMMLowpQuantizeDownInt32
void NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift,
int result_offset_after_shift, int min, int max)
{
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel>();
+ auto k = std::make_unique<NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel>();
k->configure(input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
_kernel = std::move(k);
}
@@ -67,7 +66,7 @@ NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::~NEGEMMLowpQuantizeDownInt3
void NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift, int min, int max)
{
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel>();
+ auto k = std::make_unique<NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel>();
k->configure(input, bias, output, result_fixedpoint_multiplier, result_shift, min, max);
_kernel = std::move(k);
}
@@ -93,21 +92,21 @@ void NEGEMMLowpOutputStage::configure(const ITensor *input, const ITensor *bias,
{
case DataType::QASYMM8:
{
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel>();
+ auto k = std::make_unique<NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel>();
k->configure(input, bias, output, info.gemmlowp_multiplier, info.gemmlowp_shift, info.gemmlowp_offset, info.gemmlowp_min_bound, info.gemmlowp_max_bound);
_kernel = std::move(k);
break;
}
case DataType::QASYMM8_SIGNED:
{
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel>();
+ auto k = std::make_unique<NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel>();
k->configure(input, bias, output, info.gemmlowp_multiplier, info.gemmlowp_shift, info.gemmlowp_offset, info.gemmlowp_min_bound, info.gemmlowp_max_bound);
_kernel = std::move(k);
break;
}
case DataType::QSYMM16:
{
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel>();
+ auto k = std::make_unique<NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel>();
k->configure(input, bias, output, info.gemmlowp_multiplier, info.gemmlowp_shift, info.gemmlowp_min_bound, info.gemmlowp_max_bound);
_kernel = std::move(k);
break;
@@ -127,7 +126,7 @@ void NEGEMMLowpOutputStage::configure(const ITensor *input, const ITensor *bias,
case DataType::QASYMM8:
case DataType::QASYMM8_SIGNED:
{
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpQuantizeDownInt32ScaleKernel>();
+ auto k = std::make_unique<NEGEMMLowpQuantizeDownInt32ScaleKernel>();
k->configure(input, bias, output, &info);
_kernel = std::move(k);
break;
diff --git a/src/runtime/NEON/functions/NEGEMMTranspose1xW.cpp b/src/runtime/NEON/functions/NEGEMMTranspose1xW.cpp
index 90cf0bab07..0408cfa585 100644
--- a/src/runtime/NEON/functions/NEGEMMTranspose1xW.cpp
+++ b/src/runtime/NEON/functions/NEGEMMTranspose1xW.cpp
@@ -28,13 +28,12 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEGEMMTranspose1xW::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMTranspose1xWKernel>();
+ auto k = std::make_unique<NEGEMMTranspose1xWKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEGather.cpp b/src/runtime/NEON/functions/NEGather.cpp
index 5c0dae1507..86cbfd187a 100644
--- a/src/runtime/NEON/functions/NEGather.cpp
+++ b/src/runtime/NEON/functions/NEGather.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEGather.h"
#include "src/core/NEON/kernels/NEGatherKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,7 +31,7 @@ namespace arm_compute
{
void NEGather::configure(const ITensor *input, const ITensor *indices, ITensor *output, int axis)
{
- auto k = arm_compute::support::cpp14::make_unique<NEGatherKernel>();
+ auto k = std::make_unique<NEGatherKernel>();
k->configure(input, indices, output, axis);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEGaussian3x3.cpp b/src/runtime/NEON/functions/NEGaussian3x3.cpp
index 5290de1348..93e813c052 100644
--- a/src/runtime/NEON/functions/NEGaussian3x3.cpp
+++ b/src/runtime/NEON/functions/NEGaussian3x3.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEGaussian3x3Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,11 +33,11 @@ namespace arm_compute
{
void NEGaussian3x3::configure(ITensor *input, ITensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<NEGaussian3x3Kernel>();
+ auto k = std::make_unique<NEGaussian3x3Kernel>();
k->configure(input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NEGaussian5x5.cpp b/src/runtime/NEON/functions/NEGaussian5x5.cpp
index 7857710462..ed7e83b937 100644
--- a/src/runtime/NEON/functions/NEGaussian5x5.cpp
+++ b/src/runtime/NEON/functions/NEGaussian5x5.cpp
@@ -30,7 +30,6 @@
#include "arm_compute/runtime/TensorAllocator.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEGaussian5x5Kernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -50,9 +49,9 @@ void NEGaussian5x5::configure(ITensor *input, ITensor *output, BorderMode border
// Manage intermediate buffers
_memory_group.manage(&_tmp);
- _kernel_hor = arm_compute::support::cpp14::make_unique<NEGaussian5x5HorKernel>();
- _kernel_vert = arm_compute::support::cpp14::make_unique<NEGaussian5x5VertKernel>();
- _border_handler = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _kernel_hor = std::make_unique<NEGaussian5x5HorKernel>();
+ _kernel_vert = std::make_unique<NEGaussian5x5VertKernel>();
+ _border_handler = std::make_unique<NEFillBorderKernel>();
// Create and configure kernels for the two passes
_kernel_hor->configure(input, &_tmp, border_mode == BorderMode::UNDEFINED);
diff --git a/src/runtime/NEON/functions/NEGaussianPyramid.cpp b/src/runtime/NEON/functions/NEGaussianPyramid.cpp
index 30fe70f0ab..c9a36fc466 100644
--- a/src/runtime/NEON/functions/NEGaussianPyramid.cpp
+++ b/src/runtime/NEON/functions/NEGaussianPyramid.cpp
@@ -36,7 +36,6 @@
#include "src/core/NEON/kernels/NEGaussian5x5Kernel.h"
#include "src/core/NEON/kernels/NEGaussianPyramidKernel.h"
#include "src/core/NEON/kernels/NEScaleKernel.h"
-#include "support/MemorySupport.h"
#include <cstddef>
@@ -98,19 +97,19 @@ void NEGaussianPyramidHalf::configure(const ITensor *input, IPyramid *pyramid, B
for(size_t i = 0; i < num_stages; ++i)
{
/* Configure horizontal kernel */
- _horizontal_reduction[i] = arm_compute::support::cpp14::make_unique<NEGaussianPyramidHorKernel>();
+ _horizontal_reduction[i] = std::make_unique<NEGaussianPyramidHorKernel>();
_horizontal_reduction[i]->configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i));
/* Configure vertical kernel */
- _vertical_reduction[i] = arm_compute::support::cpp14::make_unique<NEGaussianPyramidVertKernel>();
+ _vertical_reduction[i] = std::make_unique<NEGaussianPyramidVertKernel>();
_vertical_reduction[i]->configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1));
/* Configure border */
- _horizontal_border_handler[i] = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _horizontal_border_handler[i] = std::make_unique<NEFillBorderKernel>();
_horizontal_border_handler[i]->configure(_pyramid->get_pyramid_level(i), _horizontal_reduction[i]->border_size(), border_mode, PixelValue(constant_border_value));
/* Configure border */
- _vertical_border_handler[i] = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _vertical_border_handler[i] = std::make_unique<NEFillBorderKernel>();
_vertical_border_handler[i]->configure(_tmp.get_pyramid_level(i), _vertical_reduction[i]->border_size(), border_mode, PixelValue(pixel_value_u16));
}
diff --git a/src/runtime/NEON/functions/NEHOGDescriptor.cpp b/src/runtime/NEON/functions/NEHOGDescriptor.cpp
index 689e64fae7..bb125a1eae 100644
--- a/src/runtime/NEON/functions/NEHOGDescriptor.cpp
+++ b/src/runtime/NEON/functions/NEHOGDescriptor.cpp
@@ -31,7 +31,6 @@
#include "src/core/NEON/kernels/NEDerivativeKernel.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEHOGDescriptorKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -88,11 +87,11 @@ void NEHOGDescriptor::configure(ITensor *input, ITensor *output, const IHOG *hog
_memory_group.manage(&_hog_space);
// Initialise orientation binning kernel
- _orient_bin = arm_compute::support::cpp14::make_unique<NEHOGOrientationBinningKernel>();
+ _orient_bin = std::make_unique<NEHOGOrientationBinningKernel>();
_orient_bin->configure(&_mag, &_phase, &_hog_space, hog->info());
// Initialize HOG norm kernel
- _block_norm = arm_compute::support::cpp14::make_unique<NEHOGBlockNormalizationKernel>();
+ _block_norm = std::make_unique<NEHOGBlockNormalizationKernel>();
_block_norm->configure(&_hog_space, output, hog->info());
// Allocate intermediate tensors
diff --git a/src/runtime/NEON/functions/NEHOGDetector.cpp b/src/runtime/NEON/functions/NEHOGDetector.cpp
index 8468b75f4e..3eda1b0ce0 100644
--- a/src/runtime/NEON/functions/NEHOGDetector.cpp
+++ b/src/runtime/NEON/functions/NEHOGDetector.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEHOGDetector.h"
#include "src/core/NEON/kernels/NEHOGDetectorKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -32,7 +31,7 @@ NEHOGDetector::~NEHOGDetector() = default;
void NEHOGDetector::configure(const ITensor *input, const IHOG *hog, IDetectionWindowArray *detection_windows, const Size2D &detection_window_stride, float threshold, size_t idx_class)
{
- auto k = arm_compute::support::cpp14::make_unique<NEHOGDetectorKernel>();
+ auto k = std::make_unique<NEHOGDetectorKernel>();
k->configure(input, hog, detection_windows, detection_window_stride, threshold, idx_class);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEHOGGradient.cpp b/src/runtime/NEON/functions/NEHOGGradient.cpp
index 7d794bc1a0..f5a47735a9 100644
--- a/src/runtime/NEON/functions/NEHOGGradient.cpp
+++ b/src/runtime/NEON/functions/NEHOGGradient.cpp
@@ -28,7 +28,6 @@
#include "src/core/NEON/kernels/NEDerivativeKernel.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEMagnitudePhaseKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -66,13 +65,13 @@ void NEHOGGradient::configure(ITensor *input, ITensor *output_magnitude, ITensor
// Initialise magnitude/phase kernel
if(PhaseType::UNSIGNED == phase_type)
{
- auto k = arm_compute::support::cpp14::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L2NORM, PhaseType::UNSIGNED>>();
+ auto k = std::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L2NORM, PhaseType::UNSIGNED>>();
k->configure(&_gx, &_gy, output_magnitude, output_phase);
_mag_phase = std::move(k);
}
else
{
- auto k = arm_compute::support::cpp14::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L2NORM, PhaseType::SIGNED>>();
+ auto k = std::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L2NORM, PhaseType::SIGNED>>();
k->configure(&_gx, &_gy, output_magnitude, output_phase);
_mag_phase = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEHarrisCorners.cpp b/src/runtime/NEON/functions/NEHarrisCorners.cpp
index 23fcf8c805..6b15596f8a 100644
--- a/src/runtime/NEON/functions/NEHarrisCorners.cpp
+++ b/src/runtime/NEON/functions/NEHarrisCorners.cpp
@@ -37,7 +37,6 @@
#include "src/core/NEON/kernels/NEHarrisCornersKernel.h"
#include "src/core/NEON/kernels/NESobel5x5Kernel.h"
#include "src/core/NEON/kernels/NESobel7x7Kernel.h"
-#include "support/MemorySupport.h"
#include <cmath>
#include <utility>
@@ -102,21 +101,21 @@ void NEHarrisCorners::configure(IImage *input, float threshold, float min_dist,
{
case 3:
{
- auto k = arm_compute::support::cpp14::make_unique<NESobel3x3>();
+ auto k = std::make_unique<NESobel3x3>();
k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
break;
}
case 5:
{
- auto k = arm_compute::support::cpp14::make_unique<NESobel5x5>();
+ auto k = std::make_unique<NESobel5x5>();
k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
break;
}
case 7:
{
- auto k = arm_compute::support::cpp14::make_unique<NESobel7x7>();
+ auto k = std::make_unique<NESobel7x7>();
k->configure(input, &_gx, &_gy, border_mode, constant_border_value);
_sobel = std::move(k);
break;
@@ -136,21 +135,21 @@ void NEHarrisCorners::configure(IImage *input, float threshold, float min_dist,
{
case 3:
{
- auto k = arm_compute::support::cpp14::make_unique<NEHarrisScoreKernel<3>>();
+ auto k = std::make_unique<NEHarrisScoreKernel<3>>();
k->configure(&_gx, &_gy, &_score, norm_factor, threshold, sensitivity, border_mode == BorderMode::UNDEFINED);
_harris_score = std::move(k);
}
break;
case 5:
{
- auto k = arm_compute::support::cpp14::make_unique<NEHarrisScoreKernel<5>>();
+ auto k = std::make_unique<NEHarrisScoreKernel<5>>();
k->configure(&_gx, &_gy, &_score, norm_factor, threshold, sensitivity, border_mode == BorderMode::UNDEFINED);
_harris_score = std::move(k);
}
break;
case 7:
{
- auto k = arm_compute::support::cpp14::make_unique<NEHarrisScoreKernel<7>>();
+ auto k = std::make_unique<NEHarrisScoreKernel<7>>();
k->configure(&_gx, &_gy, &_score, norm_factor, threshold, sensitivity, border_mode == BorderMode::UNDEFINED);
_harris_score = std::move(k);
}
@@ -159,8 +158,8 @@ void NEHarrisCorners::configure(IImage *input, float threshold, float min_dist,
}
// Configure border filling before harris score
- _border_gx = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
- _border_gy = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _border_gx = std::make_unique<NEFillBorderKernel>();
+ _border_gy = std::make_unique<NEFillBorderKernel>();
_border_gx->configure(&_gx, _harris_score->border_size(), border_mode, constant_border_value);
_border_gy->configure(&_gy, _harris_score->border_size(), border_mode, constant_border_value);
diff --git a/src/runtime/NEON/functions/NEHistogram.cpp b/src/runtime/NEON/functions/NEHistogram.cpp
index 40ea3a16c6..1b093d60e5 100644
--- a/src/runtime/NEON/functions/NEHistogram.cpp
+++ b/src/runtime/NEON/functions/NEHistogram.cpp
@@ -30,7 +30,6 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEHistogramKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -51,7 +50,7 @@ void NEHistogram::configure(const IImage *input, IDistribution1D *output)
_local_hist.resize(_local_hist_size);
// Configure kernel
- _histogram_kernel = arm_compute::support::cpp14::make_unique<NEHistogramKernel>();
+ _histogram_kernel = std::make_unique<NEHistogramKernel>();
_histogram_kernel->configure(input, output, _local_hist.data(), _window_lut.data());
}
diff --git a/src/runtime/NEON/functions/NEIm2Col.cpp b/src/runtime/NEON/functions/NEIm2Col.cpp
index bc0c60112e..d6d72aa712 100644
--- a/src/runtime/NEON/functions/NEIm2Col.cpp
+++ b/src/runtime/NEON/functions/NEIm2Col.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEIm2ColKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -41,7 +40,7 @@ void NEIm2Col::configure(const ITensor *input, ITensor *output, const Size2D &ke
{
_y_dim = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
- _kernel = arm_compute::support::cpp14::make_unique<NEIm2ColKernel>();
+ _kernel = std::make_unique<NEIm2ColKernel>();
_kernel->configure(input, output, kernel_dims, conv_info, has_bias, dilation, num_groups);
}
diff --git a/src/runtime/NEON/functions/NEInstanceNormalizationLayer.cpp b/src/runtime/NEON/functions/NEInstanceNormalizationLayer.cpp
index e3fb284796..5965b9722f 100644
--- a/src/runtime/NEON/functions/NEInstanceNormalizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEInstanceNormalizationLayer.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -46,7 +45,7 @@ void NEInstanceNormalizationLayer::configure(ITensor *input, ITensor *output, fl
// Configure Kernels
_is_nchw = data_layout == DataLayout::NCHW;
- _normalization_kernel = arm_compute::support::cpp14::make_unique<NEInstanceNormalizationLayerKernel>();
+ _normalization_kernel = std::make_unique<NEInstanceNormalizationLayerKernel>();
if(!_is_nchw)
{
diff --git a/src/runtime/NEON/functions/NEIntegralImage.cpp b/src/runtime/NEON/functions/NEIntegralImage.cpp
index 63bcd53373..38f04247f6 100644
--- a/src/runtime/NEON/functions/NEIntegralImage.cpp
+++ b/src/runtime/NEON/functions/NEIntegralImage.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/Types.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEIntegralImageKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -36,11 +35,11 @@ NEIntegralImage::~NEIntegralImage() = default;
void NEIntegralImage::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEIntegralImageKernel>();
+ auto k = std::make_unique<NEIntegralImageKernel>();
k->configure(input, output);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(output, _kernel->border_size(), BorderMode::CONSTANT, PixelValue());
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NEL2NormalizeLayer.cpp b/src/runtime/NEON/functions/NEL2NormalizeLayer.cpp
index 4a99968cc3..505ee0a962 100644
--- a/src/runtime/NEON/functions/NEL2NormalizeLayer.cpp
+++ b/src/runtime/NEON/functions/NEL2NormalizeLayer.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEL2NormalizeLayerKernel.h"
#include "src/core/NEON/kernels/NEReductionOperationKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -50,7 +49,7 @@ void NEL2NormalizeLayer::configure(ITensor *input, ITensor *output, int axis, fl
// Configure Kernels
const uint32_t actual_axis = wrap_around(axis, max_input_tensor_dim);
_reduce_func.configure(input, &_sumsq, actual_axis, ReductionOperation::SUM_SQUARE);
- _normalize_kernel = arm_compute::support::cpp14::make_unique<NEL2NormalizeLayerKernel>();
+ _normalize_kernel = std::make_unique<NEL2NormalizeLayerKernel>();
_normalize_kernel->configure(input, &_sumsq, output, axis, epsilon);
// Allocate intermediate tensors
diff --git a/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp b/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp
index 131ac82ba8..c1164c3bee 100644
--- a/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NELocallyConnectedLayer.cpp
@@ -30,7 +30,6 @@
#include "src/core/NEON/kernels/NEIm2ColKernel.h"
#include "src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.h"
#include "src/core/NEON/kernels/NEWeightsReshapeKernel.h"
-#include "support/MemorySupport.h"
#include <cmath>
#include <tuple>
@@ -160,9 +159,9 @@ void NELocallyConnectedLayer::configure(const ITensor *input, const ITensor *wei
// Configure kernels
_input_im2col.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _has_bias);
- _weights_reshape_kernel = arm_compute::support::cpp14::make_unique<NEWeightsReshapeKernel>();
+ _weights_reshape_kernel = std::make_unique<NEWeightsReshapeKernel>();
_weights_reshape_kernel->configure(weights, biases, &_weights_reshaped);
- _mm_kernel = arm_compute::support::cpp14::make_unique<NELocallyConnectedMatrixMultiplyKernel>();
+ _mm_kernel = std::make_unique<NELocallyConnectedMatrixMultiplyKernel>();
_mm_kernel->configure(&_input_im2col_reshaped, &_weights_reshaped, &_gemm_output);
_output_col2im.configure(&_gemm_output, output, Size2D(conv_w, conv_h));
diff --git a/src/runtime/NEON/functions/NELogical.cpp b/src/runtime/NEON/functions/NELogical.cpp
index 8e43d60bef..2c9ebd5f29 100644
--- a/src/runtime/NEON/functions/NELogical.cpp
+++ b/src/runtime/NEON/functions/NELogical.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "arm_compute/runtime/Tensor.h"
#include "src/core/NEON/kernels/NELogicalKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -40,7 +39,7 @@ struct NELogicalAnd::Impl : public LogicalArgs
{
};
NELogicalAnd::NELogicalAnd()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NELogicalAnd &NELogicalAnd::operator=(NELogicalAnd &&) = default;
@@ -50,7 +49,7 @@ void NELogicalAnd::configure(const ITensor *input1, const ITensor *input2, ITens
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- _impl->kernel = arm_compute::support::cpp14::make_unique<kernels::NELogicalKernel>();
+ _impl->kernel = std::make_unique<kernels::NELogicalKernel>();
_impl->kernel->configure(input1->info(), input2->info(), output->info(), kernels::LogicalOperation::And);
_impl->pack = ITensorPack();
@@ -73,7 +72,7 @@ struct NELogicalOr::Impl : public LogicalArgs
{
};
NELogicalOr::NELogicalOr()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NELogicalOr &NELogicalOr::operator=(NELogicalOr &&) = default;
@@ -83,7 +82,7 @@ void NELogicalOr::configure(const ITensor *input1, const ITensor *input2, ITenso
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
- _impl->kernel = arm_compute::support::cpp14::make_unique<kernels::NELogicalKernel>();
+ _impl->kernel = std::make_unique<kernels::NELogicalKernel>();
_impl->kernel->configure(input1->info(), input2->info(), output->info(), kernels::LogicalOperation::Or);
_impl->pack = ITensorPack();
@@ -106,7 +105,7 @@ struct NELogicalNot::Impl : public LogicalArgs
{
};
NELogicalNot::NELogicalNot()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NELogicalNot &NELogicalNot::operator=(NELogicalNot &&) = default;
@@ -116,7 +115,7 @@ void NELogicalNot::configure(const ITensor *input, ITensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- _impl->kernel = arm_compute::support::cpp14::make_unique<kernels::NELogicalKernel>();
+ _impl->kernel = std::make_unique<kernels::NELogicalKernel>();
_impl->kernel->configure(input->info(), nullptr, output->info(), kernels::LogicalOperation::Not);
_impl->pack = ITensorPack();
diff --git a/src/runtime/NEON/functions/NEMagnitude.cpp b/src/runtime/NEON/functions/NEMagnitude.cpp
index 06ed8d46c9..34d9a7fb0b 100644
--- a/src/runtime/NEON/functions/NEMagnitude.cpp
+++ b/src/runtime/NEON/functions/NEMagnitude.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/Types.h"
#include "src/core/NEON/kernels/NEMagnitudePhaseKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,13 +36,13 @@ void NEMagnitude::configure(const ITensor *input1, const ITensor *input2, ITenso
{
if(mag_type == MagnitudeType::L1NORM)
{
- auto k = arm_compute::support::cpp14::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L1NORM, PhaseType::SIGNED>>();
+ auto k = std::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L1NORM, PhaseType::SIGNED>>();
k->configure(input1, input2, output, nullptr);
_kernel = std::move(k);
}
else
{
- auto k = arm_compute::support::cpp14::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L2NORM, PhaseType::SIGNED>>();
+ auto k = std::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L2NORM, PhaseType::SIGNED>>();
k->configure(input1, input2, output, nullptr);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEMaxUnpoolingLayer.cpp b/src/runtime/NEON/functions/NEMaxUnpoolingLayer.cpp
index e8c9d09d95..da6260b0c5 100644
--- a/src/runtime/NEON/functions/NEMaxUnpoolingLayer.cpp
+++ b/src/runtime/NEON/functions/NEMaxUnpoolingLayer.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEMaxUnpoolingLayerKernel.h"
#include "src/core/NEON/kernels/NEMemsetKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -42,8 +41,8 @@ NEMaxUnpoolingLayer::NEMaxUnpoolingLayer()
void NEMaxUnpoolingLayer::configure(ITensor *input, ITensor *indices, ITensor *output, const PoolingLayerInfo &pool_info)
{
const PixelValue zero_value(0.f);
- _memset_kernel = arm_compute::support::cpp14::make_unique<NEMemsetKernel>();
- _unpooling_layer_kernel = arm_compute::support::cpp14::make_unique<NEMaxUnpoolingLayerKernel>();
+ _memset_kernel = std::make_unique<NEMemsetKernel>();
+ _unpooling_layer_kernel = std::make_unique<NEMaxUnpoolingLayerKernel>();
_memset_kernel->configure(output, zero_value);
_unpooling_layer_kernel->configure(input, indices, output, pool_info);
}
diff --git a/src/runtime/NEON/functions/NEMeanStdDev.cpp b/src/runtime/NEON/functions/NEMeanStdDev.cpp
index e073420114..6e2d7fc81d 100644
--- a/src/runtime/NEON/functions/NEMeanStdDev.cpp
+++ b/src/runtime/NEON/functions/NEMeanStdDev.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEMeanStdDevKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -39,8 +38,8 @@ NEMeanStdDev::NEMeanStdDev()
void NEMeanStdDev::configure(IImage *input, float *mean, float *stddev)
{
- _mean_stddev_kernel = arm_compute::support::cpp14::make_unique<NEMeanStdDevKernel>();
- _fill_border_kernel = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _mean_stddev_kernel = std::make_unique<NEMeanStdDevKernel>();
+ _fill_border_kernel = std::make_unique<NEFillBorderKernel>();
_mean_stddev_kernel->configure(input, mean, &_global_sum, stddev, &_global_sum_squared);
_fill_border_kernel->configure(input, _mean_stddev_kernel->border_size(), BorderMode::CONSTANT, PixelValue(static_cast<uint8_t>(0)));
diff --git a/src/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.cpp b/src/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.cpp
index d128c4456a..02de983b77 100644
--- a/src/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.h"
#include "src/core/NEON/kernels/NEMeanStdDevNormalizationKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -32,7 +31,7 @@ NEMeanStdDevNormalizationLayer::~NEMeanStdDevNormalizationLayer() = default;
void NEMeanStdDevNormalizationLayer::configure(ITensor *input, ITensor *output, float epsilon)
{
- auto k = arm_compute::support::cpp14::make_unique<NEMeanStdDevNormalizationKernel>();
+ auto k = std::make_unique<NEMeanStdDevNormalizationKernel>();
k->configure(input, output, epsilon);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEMedian3x3.cpp b/src/runtime/NEON/functions/NEMedian3x3.cpp
index b7b7c2cb47..4d117783ed 100644
--- a/src/runtime/NEON/functions/NEMedian3x3.cpp
+++ b/src/runtime/NEON/functions/NEMedian3x3.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEMedian3x3Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,11 +33,11 @@ namespace arm_compute
{
void NEMedian3x3::configure(ITensor *input, ITensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<NEMedian3x3Kernel>();
+ auto k = std::make_unique<NEMedian3x3Kernel>();
k->configure(input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NEMinMaxLocation.cpp b/src/runtime/NEON/functions/NEMinMaxLocation.cpp
index 3c2219ca07..ffbc33bc2e 100644
--- a/src/runtime/NEON/functions/NEMinMaxLocation.cpp
+++ b/src/runtime/NEON/functions/NEMinMaxLocation.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEMinMaxLocationKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -38,10 +37,10 @@ NEMinMaxLocation::NEMinMaxLocation()
void NEMinMaxLocation::configure(const IImage *input, void *min, void *max, ICoordinates2DArray *min_loc, ICoordinates2DArray *max_loc, uint32_t *min_count, uint32_t *max_count)
{
- _min_max = arm_compute::support::cpp14::make_unique<NEMinMaxKernel>();
+ _min_max = std::make_unique<NEMinMaxKernel>();
_min_max->configure(input, min, max);
- _min_max_loc = arm_compute::support::cpp14::make_unique<NEMinMaxLocationKernel>();
+ _min_max_loc = std::make_unique<NEMinMaxLocationKernel>();
_min_max_loc->configure(input, min, max, min_loc, max_loc, min_count, max_count);
}
diff --git a/src/runtime/NEON/functions/NENonLinearFilter.cpp b/src/runtime/NEON/functions/NENonLinearFilter.cpp
index 4d8fd00cbd..f3acabfa6d 100644
--- a/src/runtime/NEON/functions/NENonLinearFilter.cpp
+++ b/src/runtime/NEON/functions/NENonLinearFilter.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NENonLinearFilterKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -36,11 +35,11 @@ void NENonLinearFilter::configure(ITensor *input, ITensor *output, NonLinearFilt
BorderMode border_mode,
uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<NENonLinearFilterKernel>();
+ auto k = std::make_unique<NENonLinearFilterKernel>();
k->configure(input, output, function, mask_size, pattern, mask, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NENonMaximaSuppression3x3.cpp b/src/runtime/NEON/functions/NENonMaximaSuppression3x3.cpp
index b8f5c251b7..a34be71ea0 100644
--- a/src/runtime/NEON/functions/NENonMaximaSuppression3x3.cpp
+++ b/src/runtime/NEON/functions/NENonMaximaSuppression3x3.cpp
@@ -25,7 +25,6 @@
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -33,11 +32,11 @@ namespace arm_compute
{
void NENonMaximaSuppression3x3::configure(ITensor *input, ITensor *output, BorderMode border_mode)
{
- auto k = arm_compute::support::cpp14::make_unique<NENonMaximaSuppression3x3Kernel>();
+ auto k = std::make_unique<NENonMaximaSuppression3x3Kernel>();
k->configure(input, output, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
if(border_mode != BorderMode::UNDEFINED)
{
b->configure(input, BorderSize(1), BorderMode::CONSTANT, static_cast<float>(0.f));
diff --git a/src/runtime/NEON/functions/NENormalizationLayer.cpp b/src/runtime/NEON/functions/NENormalizationLayer.cpp
index dfc73b2a57..9dcb157c03 100644
--- a/src/runtime/NEON/functions/NENormalizationLayer.cpp
+++ b/src/runtime/NEON/functions/NENormalizationLayer.cpp
@@ -30,7 +30,6 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NENormalizationLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -52,7 +51,7 @@ void NENormalizationLayer::configure(const ITensor *input, ITensor *output, cons
_memory_group.manage(&_input_squared);
// Configure kernels
- _norm_kernel = arm_compute::support::cpp14::make_unique<NENormalizationLayerKernel>();
+ _norm_kernel = std::make_unique<NENormalizationLayerKernel>();
_norm_kernel->configure(input, &_input_squared, output, norm_info);
_multiply_f.configure(input, input, &_input_squared, 1.0f, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO);
diff --git a/src/runtime/NEON/functions/NEOpticalFlow.cpp b/src/runtime/NEON/functions/NEOpticalFlow.cpp
index 565346bfce..a868208aaf 100644
--- a/src/runtime/NEON/functions/NEOpticalFlow.cpp
+++ b/src/runtime/NEON/functions/NEOpticalFlow.cpp
@@ -34,7 +34,6 @@
#include "arm_compute/runtime/TensorAllocator.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NELKTrackerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -114,7 +113,7 @@ void NEOpticalFlow::configure(const Pyramid *old_pyramid, const Pyramid *new_pyr
_func_scharr[i].configure(old_ith_input, &_scharr_gx[i], &_scharr_gy[i], border_mode, constant_border_value);
// Init Lucas-Kanade kernel
- _kernel_tracker[i] = arm_compute::support::cpp14::make_unique<NELKTrackerKernel>();
+ _kernel_tracker[i] = std::make_unique<NELKTrackerKernel>();
_kernel_tracker[i]->configure(old_ith_input, new_ith_input, &_scharr_gx[i], &_scharr_gy[i],
old_points, new_points_estimates, new_points,
&_old_points_internal, &_new_points_internal,
diff --git a/src/runtime/NEON/functions/NEPReluLayer.cpp b/src/runtime/NEON/functions/NEPReluLayer.cpp
index 00a1a4257a..fe656c0be0 100644
--- a/src/runtime/NEON/functions/NEPReluLayer.cpp
+++ b/src/runtime/NEON/functions/NEPReluLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/ITensor.h"
#include "src/core/NEON/kernels/NEElementwiseOperationKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -33,7 +32,7 @@ namespace experimental
{
void NEPRelu::configure(const ITensorInfo *input, const ITensorInfo *alpha, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
+ auto k = std::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::PRELU, input, alpha, output);
_kernel = std::move(k);
}
@@ -53,7 +52,7 @@ struct NEPReluLayer::Impl
};
NEPReluLayer::NEPReluLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEPReluLayer::NEPReluLayer(NEPReluLayer &&) = default;
@@ -65,7 +64,7 @@ void NEPReluLayer::configure(const ITensor *input, const ITensor *alpha, ITensor
_impl->src_0 = input;
_impl->src_1 = alpha;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEPRelu>();
+ _impl->op = std::make_unique<experimental::NEPRelu>();
_impl->op->configure(input->info(), alpha->info(), output->info());
}
diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp
index 92659f39a2..88a73b8b0d 100644
--- a/src/runtime/NEON/functions/NEPadLayer.cpp
+++ b/src/runtime/NEON/functions/NEPadLayer.cpp
@@ -30,7 +30,6 @@
#include "src/core/NEON/kernels/NECopyKernel.h"
#include "src/core/NEON/kernels/NEPadLayerKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -59,7 +58,7 @@ NEPadLayer::NEPadLayer()
void NEPadLayer::configure_constant_mode(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value)
{
- _pad_kernel = arm_compute::support::cpp14::make_unique<NEPadLayerKernel>();
+ _pad_kernel = std::make_unique<NEPadLayerKernel>();
_pad_kernel->configure(input, output, padding, constant_value, PaddingMode::CONSTANT);
}
@@ -201,7 +200,7 @@ void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &p
else
{
// Copy the input to the whole output if no padding is applied
- _copy_kernel = arm_compute::support::cpp14::make_unique<NECopyKernel>();
+ _copy_kernel = std::make_unique<NECopyKernel>();
_copy_kernel->configure(input, output);
}
}
diff --git a/src/runtime/NEON/functions/NEPermute.cpp b/src/runtime/NEON/functions/NEPermute.cpp
index d2a115fdc8..cceb22f8c6 100644
--- a/src/runtime/NEON/functions/NEPermute.cpp
+++ b/src/runtime/NEON/functions/NEPermute.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/NEON/functions/NEPermute.h"
#include "src/core/NEON/kernels/NEPermuteKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEPermute::configure(const ITensor *input, ITensor *output, const PermutationVector &perm)
{
- auto k = arm_compute::support::cpp14::make_unique<NEPermuteKernel>();
+ auto k = std::make_unique<NEPermuteKernel>();
k->configure(input, output, perm);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEPhase.cpp b/src/runtime/NEON/functions/NEPhase.cpp
index 3b6182a269..3b69a10e7f 100644
--- a/src/runtime/NEON/functions/NEPhase.cpp
+++ b/src/runtime/NEON/functions/NEPhase.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEPhase.h"
#include "src/core/NEON/kernels/NEMagnitudePhaseKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,13 +33,13 @@ void NEPhase::configure(const ITensor *input1, const ITensor *input2, ITensor *o
{
if(phase_type == PhaseType::UNSIGNED)
{
- auto k = arm_compute::support::cpp14::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L2NORM, PhaseType::UNSIGNED>>();
+ auto k = std::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L2NORM, PhaseType::UNSIGNED>>();
k->configure(input1, input2, nullptr, output);
_kernel = std::move(k);
}
else
{
- auto k = arm_compute::support::cpp14::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L2NORM, PhaseType::SIGNED>>();
+ auto k = std::make_unique<NEMagnitudePhaseKernel<MagnitudeType::L2NORM, PhaseType::SIGNED>>();
k->configure(input1, input2, nullptr, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp b/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
index f7f4437554..179bcdaf3e 100644
--- a/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
+++ b/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/ITensor.h"
#include "src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void NEPixelWiseMultiplication::configure(ITensorInfo *input1, ITensorInfo *inpu
const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(act_info);
- auto k = arm_compute::support::cpp14::make_unique<NEPixelWiseMultiplicationKernel>();
+ auto k = std::make_unique<NEPixelWiseMultiplicationKernel>();
k->configure(input1, input2, output, scale, overflow_policy, rounding_policy);
_kernel = std::move(k);
}
@@ -51,7 +50,7 @@ Status NEPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITen
void NEComplexPixelWiseMultiplication::configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(act_info);
- auto k = arm_compute::support::cpp14::make_unique<NEComplexPixelWiseMultiplicationKernel>();
+ auto k = std::make_unique<NEComplexPixelWiseMultiplicationKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
@@ -72,7 +71,7 @@ struct NEPixelWiseMultiplication::Impl
};
NEPixelWiseMultiplication::NEPixelWiseMultiplication()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEPixelWiseMultiplication::NEPixelWiseMultiplication(NEPixelWiseMultiplication &&) = default;
@@ -91,7 +90,7 @@ void NEPixelWiseMultiplication::configure(const ITensor *input1, const ITensor *
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEPixelWiseMultiplication>();
+ _impl->op = std::make_unique<experimental::NEPixelWiseMultiplication>();
_impl->op->configure(input1->info(), input2->info(), output->info(), scale, overflow_policy, rounding_policy, act_info);
}
@@ -113,7 +112,7 @@ struct NEComplexPixelWiseMultiplication::Impl
};
NEComplexPixelWiseMultiplication::NEComplexPixelWiseMultiplication()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEComplexPixelWiseMultiplication::NEComplexPixelWiseMultiplication(NEComplexPixelWiseMultiplication &&) = default;
@@ -130,7 +129,7 @@ void NEComplexPixelWiseMultiplication::configure(ITensor *input1, ITensor *input
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEComplexPixelWiseMultiplication>();
+ _impl->op = std::make_unique<experimental::NEComplexPixelWiseMultiplication>();
_impl->op->configure(input1->info(), input2->info(), output->info(), act_info);
}
diff --git a/src/runtime/NEON/functions/NEPoolingLayer.cpp b/src/runtime/NEON/functions/NEPoolingLayer.cpp
index 12ac8d6d7d..887f00de24 100644
--- a/src/runtime/NEON/functions/NEPoolingLayer.cpp
+++ b/src/runtime/NEON/functions/NEPoolingLayer.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEPoolingLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -47,7 +46,7 @@ void NEPoolingLayer::configure(ITensor *input, ITensor *output, const PoolingLay
_data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->info()->data_layout() : pool_info.data_layout;
// Configure pooling kernel
- _pooling_layer_kernel = arm_compute::support::cpp14::make_unique<NEPoolingLayerKernel>();
+ _pooling_layer_kernel = std::make_unique<NEPoolingLayerKernel>();
_pooling_layer_kernel->configure(input, output, pool_info, indices);
switch(_data_layout)
@@ -61,7 +60,7 @@ void NEPoolingLayer::configure(ITensor *input, ITensor *output, const PoolingLay
{
zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
}
- _border_handler = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _border_handler = std::make_unique<NEFillBorderKernel>();
_border_handler->configure(input, _pooling_layer_kernel->border_size(), border_mode, zero_value);
break;
}
diff --git a/src/runtime/NEON/functions/NEPriorBoxLayer.cpp b/src/runtime/NEON/functions/NEPriorBoxLayer.cpp
index bfa06da04e..0c71706586 100644
--- a/src/runtime/NEON/functions/NEPriorBoxLayer.cpp
+++ b/src/runtime/NEON/functions/NEPriorBoxLayer.cpp
@@ -32,13 +32,11 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEPriorBoxLayerKernel.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
void NEPriorBoxLayer::configure(const ITensor *input1, const ITensor *input2, ITensor *output, const PriorBoxLayerInfo &info)
{
- auto k = arm_compute::support::cpp14::make_unique<NEPriorBoxLayerKernel>();
+ auto k = std::make_unique<NEPriorBoxLayerKernel>();
k->configure(input1, input2, output, info);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEQLSTMLayer.cpp b/src/runtime/NEON/functions/NEQLSTMLayer.cpp
index 1013730235..85d62ac058 100644
--- a/src/runtime/NEON/functions/NEQLSTMLayer.cpp
+++ b/src/runtime/NEON/functions/NEQLSTMLayer.cpp
@@ -39,7 +39,6 @@
#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
#include "src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h"
#include "src/core/helpers/WindowHelpers.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -75,7 +74,7 @@ void NEQLSTMLayer::configure_layer_norm(NEQLSTMLayer::LayerNormGate g, const ITe
_memory_group.manage(&out);
out.allocator()->init(*(in->info()));
- get_layer_norm(g) = arm_compute::support::cpp14::make_unique<NEQLSTMLayerNormalizationKernel>();
+ get_layer_norm(g) = std::make_unique<NEQLSTMLayerNormalizationKernel>();
get_layer_norm(g)->configure(in, &out, get_layer_norm_weight(g), get_layer_norm_bias(g));
}
@@ -226,18 +225,18 @@ void NEQLSTMLayer::configure(const ITensor *input,
_input_to_input_weights = lstm_params.input_to_input_weights();
_recurrent_to_input_weights = lstm_params.recurrent_to_input_weights();
- _input_to_input_reduction = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixAReductionKernel>();
- _recurrent_to_input_reduction = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+ _input_to_input_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+ _recurrent_to_input_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
_input_to_input_reduction->configure(_input_to_input_weights, &_input_to_input_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true));
_recurrent_to_input_reduction->configure(_recurrent_to_input_weights, &_recurrent_to_input_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
}
- _input_to_forget_reduction = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixAReductionKernel>();
- _recurrent_to_forget_reduction = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixAReductionKernel>();
- _input_to_cell_reduction = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixAReductionKernel>();
- _recurrent_to_cell_reduction = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixAReductionKernel>();
- _input_to_output_reduction = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixAReductionKernel>();
- _recurrent_to_output_reduction = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+ _input_to_forget_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+ _recurrent_to_forget_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+ _input_to_cell_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+ _recurrent_to_cell_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+ _input_to_output_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+ _recurrent_to_output_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
_recurrent_to_cell_reduction->configure(input_to_forget_weights, &_input_to_forget_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qinput.offset, true));
_recurrent_to_forget_reduction->configure(recurrent_to_forget_weights, &_recurrent_to_forget_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
@@ -247,7 +246,7 @@ void NEQLSTMLayer::configure(const ITensor *input,
_recurrent_to_output_reduction->configure(recurrent_to_output_weights, &_recurrent_to_output_eff_bias, GEMMLowpReductionKernelInfo(num_units, false, -qoutput_state_in.offset, true));
if(_has_projection)
{
- _projection_reduction = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixAReductionKernel>();
+ _projection_reduction = std::make_unique<NEGEMMLowpMatrixAReductionKernel>();
_projection_reduction->configure(_projection_weights, &_projection_eff_bias, GEMMLowpReductionKernelInfo(output_size, false, lstm_params.hidden_state_zero(), true));
if(_projection_bias != nullptr)
{
diff --git a/src/runtime/NEON/functions/NEQuantizationLayer.cpp b/src/runtime/NEON/functions/NEQuantizationLayer.cpp
index a20ffb8858..42eb12d05d 100644
--- a/src/runtime/NEON/functions/NEQuantizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEQuantizationLayer.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "src/core/NEON/kernels/NEQuantizationLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -44,7 +43,7 @@ void NEQuantizationLayer::configure(const ITensor *input, ITensor *output)
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
// Configure quantize kernel
- auto k = arm_compute::support::cpp14::make_unique<NEQuantizationLayerKernel>();
+ auto k = std::make_unique<NEQuantizationLayerKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NERNNLayer.cpp b/src/runtime/NEON/functions/NERNNLayer.cpp
index a8e10482a7..c16d09f60c 100644
--- a/src/runtime/NEON/functions/NERNNLayer.cpp
+++ b/src/runtime/NEON/functions/NERNNLayer.cpp
@@ -42,7 +42,6 @@
#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -114,7 +113,7 @@ void NERNNLayer::configure(const ITensor *input, const ITensor *weights, const I
_activation.configure(&_add_output, hidden_state, info);
_add_output.allocator()->allocate();
- _copy_kernel = arm_compute::support::cpp14::make_unique<NECopyKernel>();
+ _copy_kernel = std::make_unique<NECopyKernel>();
_copy_kernel->configure(hidden_state, output);
}
diff --git a/src/runtime/NEON/functions/NEROIAlignLayer.cpp b/src/runtime/NEON/functions/NEROIAlignLayer.cpp
index a046140551..a946358e18 100644
--- a/src/runtime/NEON/functions/NEROIAlignLayer.cpp
+++ b/src/runtime/NEON/functions/NEROIAlignLayer.cpp
@@ -25,7 +25,6 @@
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEROIAlignLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -39,7 +38,7 @@ Status NEROIAlignLayer::validate(const ITensorInfo *input, const ITensorInfo *ro
void NEROIAlignLayer::configure(const ITensor *input, const ITensor *rois, ITensor *output, const ROIPoolingLayerInfo &pool_info)
{
// Configure ROI pooling kernel
- auto k = arm_compute::support::cpp14::make_unique<NEROIAlignLayerKernel>();
+ auto k = std::make_unique<NEROIAlignLayerKernel>();
k->configure(input, rois, output, pool_info);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEROIPoolingLayer.cpp b/src/runtime/NEON/functions/NEROIPoolingLayer.cpp
index 8bcf152881..7ca6ecc737 100644
--- a/src/runtime/NEON/functions/NEROIPoolingLayer.cpp
+++ b/src/runtime/NEON/functions/NEROIPoolingLayer.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEROIPoolingLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -39,7 +38,7 @@ NEROIPoolingLayer::NEROIPoolingLayer()
void NEROIPoolingLayer::configure(const ITensor *input, const ITensor *rois, ITensor *output, const ROIPoolingLayerInfo &pool_info)
{
- _roi_kernel = arm_compute::support::cpp14::make_unique<NEROIPoolingLayerKernel>();
+ _roi_kernel = std::make_unique<NEROIPoolingLayerKernel>();
_roi_kernel->configure(input, rois, output, pool_info);
}
diff --git a/src/runtime/NEON/functions/NERange.cpp b/src/runtime/NEON/functions/NERange.cpp
index ba166b2d58..56ef2bf657 100644
--- a/src/runtime/NEON/functions/NERange.cpp
+++ b/src/runtime/NEON/functions/NERange.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NERangeKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -38,7 +37,7 @@ NERange::NERange()
void NERange::configure(ITensor *output, const float start, const float end, const float step)
{
- _kernel = arm_compute::support::cpp14::make_unique<NERangeKernel>();
+ _kernel = std::make_unique<NERangeKernel>();
_kernel->configure(output, start, end, step);
}
diff --git a/src/runtime/NEON/functions/NEReductionOperation.cpp b/src/runtime/NEON/functions/NEReductionOperation.cpp
index 463b65ec28..5d6f520a52 100644
--- a/src/runtime/NEON/functions/NEReductionOperation.cpp
+++ b/src/runtime/NEON/functions/NEReductionOperation.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEReductionOperationKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -129,7 +128,7 @@ void NEReductionOperation::configure(ITensor *input, ITensor *output, unsigned i
ARM_COMPUTE_ERROR_THROW_ON(NEReductionOperation::validate(input->info(), output->info(), axis, op, keep_dims));
// Configure reduction kernel
- _reduction_kernel = arm_compute::support::cpp14::make_unique<NEReductionOperationKernel>();
+ _reduction_kernel = std::make_unique<NEReductionOperationKernel>();
_reduction_kernel->configure(input, output_internal, axis, op);
_window_split = reduction_window_split_dimension(axis);
_reduction_axis = axis;
diff --git a/src/runtime/NEON/functions/NERemap.cpp b/src/runtime/NEON/functions/NERemap.cpp
index 9276d49cf5..f2f57aa599 100644
--- a/src/runtime/NEON/functions/NERemap.cpp
+++ b/src/runtime/NEON/functions/NERemap.cpp
@@ -31,7 +31,6 @@
#include "arm_compute/runtime/TensorAllocator.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NERemapKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -45,11 +44,11 @@ void NERemap::configure(ITensor *input, const ITensor *map_x, const ITensor *map
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(map_y, 1, DataType::F32);
ARM_COMPUTE_ERROR_ON_MSG(policy == InterpolationPolicy::AREA, "Area interpolation is not supported");
- auto k = arm_compute::support::cpp14::make_unique<NERemapKernel>();
+ auto k = std::make_unique<NERemapKernel>();
k->configure(input, map_x, map_y, output, policy);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NEReorgLayer.cpp b/src/runtime/NEON/functions/NEReorgLayer.cpp
index 77ec7fbfb1..23ca3a4eea 100644
--- a/src/runtime/NEON/functions/NEReorgLayer.cpp
+++ b/src/runtime/NEON/functions/NEReorgLayer.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/NEON/functions/NEReorgLayer.h"
#include "src/core/NEON/kernels/NEReorgLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEReorgLayer::configure(const ITensor *input, ITensor *output, int32_t stride)
{
- auto k = arm_compute::support::cpp14::make_unique<NEReorgLayerKernel>();
+ auto k = std::make_unique<NEReorgLayerKernel>();
k->configure(input, output, stride);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEReshapeLayer.cpp b/src/runtime/NEON/functions/NEReshapeLayer.cpp
index 915d5d408f..9ad6a35cc3 100644
--- a/src/runtime/NEON/functions/NEReshapeLayer.cpp
+++ b/src/runtime/NEON/functions/NEReshapeLayer.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "arm_compute/runtime/Types.h"
#include "src/core/NEON/kernels/NEReshapeLayerKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -39,7 +38,7 @@ NEReshape::~NEReshape() = default;
void NEReshape::configure(const ITensorInfo *input, ITensorInfo *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NEReshapeLayerKernel>();
+ auto k = std::make_unique<NEReshapeLayerKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
@@ -58,7 +57,7 @@ struct NEReshapeLayer::Impl
};
NEReshapeLayer::NEReshapeLayer()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
@@ -72,7 +71,7 @@ void NEReshapeLayer::configure(const ITensor *input, ITensor *output)
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEReshape>();
+ _impl->op = std::make_unique<experimental::NEReshape>();
_impl->op->configure(input->info(), output->info());
}
diff --git a/src/runtime/NEON/functions/NEReverse.cpp b/src/runtime/NEON/functions/NEReverse.cpp
index 3ed0688386..36127ef83c 100644
--- a/src/runtime/NEON/functions/NEReverse.cpp
+++ b/src/runtime/NEON/functions/NEReverse.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/NEON/functions/NEReverse.h"
#include "src/core/NEON/kernels/NEReverseKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEReverse::configure(const ITensor *input, ITensor *output, const ITensor *axis)
{
- auto k = arm_compute::support::cpp14::make_unique<NEReverseKernel>();
+ auto k = std::make_unique<NEReverseKernel>();
k->configure(input, output, axis);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEScale.cpp b/src/runtime/NEON/functions/NEScale.cpp
index 0290fe5a01..9d6e2ca754 100644
--- a/src/runtime/NEON/functions/NEScale.cpp
+++ b/src/runtime/NEON/functions/NEScale.cpp
@@ -36,7 +36,6 @@
#include "src/core/utils/ScaleUtils.h"
-#include "support/MemorySupport.h"
#include "support/Rounding.h"
#include <cmath>
@@ -125,7 +124,7 @@ void NEScale::configure(ITensor *input, ITensor *output, const ScaleKernelInfo &
// Area interpolation behaves as Nearest Neighbour in case of up-sampling
const auto policy_to_use = (info.interpolation_policy == InterpolationPolicy::AREA && wr <= 1.f && hr <= 1.f) ? InterpolationPolicy::NEAREST_NEIGHBOR : info.interpolation_policy;
- auto scale_kernel = arm_compute::support::cpp14::make_unique<NEScaleKernel>();
+ auto scale_kernel = std::make_unique<NEScaleKernel>();
switch(policy_to_use)
{
case InterpolationPolicy::NEAREST_NEIGHBOR:
diff --git a/src/runtime/NEON/functions/NEScharr3x3.cpp b/src/runtime/NEON/functions/NEScharr3x3.cpp
index cea0eefdb0..414e9470ea 100644
--- a/src/runtime/NEON/functions/NEScharr3x3.cpp
+++ b/src/runtime/NEON/functions/NEScharr3x3.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEScharr3x3Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,11 +33,11 @@ using namespace arm_compute;
void NEScharr3x3::configure(ITensor *input, ITensor *output_x, ITensor *output_y, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<NEScharr3x3Kernel>();
+ auto k = std::make_unique<NEScharr3x3Kernel>();
k->configure(input, output_x, output_y, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NESelect.cpp b/src/runtime/NEON/functions/NESelect.cpp
index 0d1f490767..f8ba9f03ed 100644
--- a/src/runtime/NEON/functions/NESelect.cpp
+++ b/src/runtime/NEON/functions/NESelect.cpp
@@ -25,13 +25,12 @@
#include "arm_compute/core/Types.h"
#include "src/core/NEON/kernels/NESelectKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NESelect::configure(const ITensor *c, const ITensor *x, const ITensor *y, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NESelectKernel>();
+ auto k = std::make_unique<NESelectKernel>();
k->configure(c, x, y, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NESlice.cpp b/src/runtime/NEON/functions/NESlice.cpp
index dd56eaba8b..9b08bca38a 100644
--- a/src/runtime/NEON/functions/NESlice.cpp
+++ b/src/runtime/NEON/functions/NESlice.cpp
@@ -29,8 +29,6 @@
#include "arm_compute/core/utils/helpers/tensor_transform.h"
#include "src/core/NEON/kernels/NEStridedSliceKernel.h"
-#include "support/MemorySupport.h"
-
namespace arm_compute
{
namespace experimental
@@ -42,7 +40,7 @@ void NESlice::configure(const ITensorInfo *input, ITensorInfo *output, const Coo
// Get absolute end coordinates
const int32_t slice_end_mask = arm_compute::helpers::tensor_transform::construct_slice_end_mask(ends);
- auto k = arm_compute::support::cpp14::make_unique<NEStridedSliceKernel>();
+ auto k = std::make_unique<NEStridedSliceKernel>();
k->configure(input, output, starts, ends, BiStrides(), 0, slice_end_mask, 0);
_kernel = std::move(k);
}
@@ -72,7 +70,7 @@ struct NESlice::Impl
};
NESlice::NESlice()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NESlice::NESlice(NESlice &&) = default;
@@ -88,7 +86,7 @@ void NESlice::configure(const ITensor *input, ITensor *output, const Coordinates
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NESlice>();
+ _impl->op = std::make_unique<experimental::NESlice>();
_impl->op->configure(input->info(), output->info(), starts, ends);
}
diff --git a/src/runtime/NEON/functions/NESobel3x3.cpp b/src/runtime/NEON/functions/NESobel3x3.cpp
index 38d2dc227e..1a57bc3fc6 100644
--- a/src/runtime/NEON/functions/NESobel3x3.cpp
+++ b/src/runtime/NEON/functions/NESobel3x3.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/PixelValue.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NESobel3x3Kernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -34,11 +33,11 @@ namespace arm_compute
{
void NESobel3x3::configure(ITensor *input, ITensor *output_x, ITensor *output_y, BorderMode border_mode, uint8_t constant_border_value)
{
- auto k = arm_compute::support::cpp14::make_unique<NESobel3x3Kernel>();
+ auto k = std::make_unique<NESobel3x3Kernel>();
k->configure(input, output_x, output_y, border_mode == BorderMode::UNDEFINED);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, PixelValue(constant_border_value));
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NESobel5x5.cpp b/src/runtime/NEON/functions/NESobel5x5.cpp
index e631fb3ed7..e587981fa9 100644
--- a/src/runtime/NEON/functions/NESobel5x5.cpp
+++ b/src/runtime/NEON/functions/NESobel5x5.cpp
@@ -31,7 +31,6 @@
#include "arm_compute/runtime/TensorAllocator.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NESobel5x5Kernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -51,9 +50,9 @@ void NESobel5x5::configure(ITensor *input, ITensor *output_x, ITensor *output_y,
TensorInfo tensor_info(input->info()->tensor_shape(), Format::S16);
- _sobel_hor = arm_compute::support::cpp14::make_unique<NESobel5x5HorKernel>();
- _sobel_vert = arm_compute::support::cpp14::make_unique<NESobel5x5VertKernel>();
- _border_handler = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _sobel_hor = std::make_unique<NESobel5x5HorKernel>();
+ _sobel_vert = std::make_unique<NESobel5x5VertKernel>();
+ _border_handler = std::make_unique<NEFillBorderKernel>();
if(run_sobel_x && run_sobel_y)
{
diff --git a/src/runtime/NEON/functions/NESobel7x7.cpp b/src/runtime/NEON/functions/NESobel7x7.cpp
index bc5f87c1ec..7b1a975951 100644
--- a/src/runtime/NEON/functions/NESobel7x7.cpp
+++ b/src/runtime/NEON/functions/NESobel7x7.cpp
@@ -31,7 +31,6 @@
#include "arm_compute/runtime/TensorAllocator.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NESobel7x7Kernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -50,9 +49,9 @@ void NESobel7x7::configure(ITensor *input, ITensor *output_x, ITensor *output_y,
const bool run_sobel_y = output_y != nullptr;
TensorInfo tensor_info(input->info()->tensor_shape(), Format::S32);
- _sobel_hor = arm_compute::support::cpp14::make_unique<NESobel7x7HorKernel>();
- _sobel_vert = arm_compute::support::cpp14::make_unique<NESobel7x7VertKernel>();
- _border_handler = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _sobel_hor = std::make_unique<NESobel7x7HorKernel>();
+ _sobel_vert = std::make_unique<NESobel7x7VertKernel>();
+ _border_handler = std::make_unique<NEFillBorderKernel>();
if(run_sobel_x && run_sobel_y)
{
diff --git a/src/runtime/NEON/functions/NESoftmaxLayer.cpp b/src/runtime/NEON/functions/NESoftmaxLayer.cpp
index e79ab0ee2d..6be34ad1a4 100644
--- a/src/runtime/NEON/functions/NESoftmaxLayer.cpp
+++ b/src/runtime/NEON/functions/NESoftmaxLayer.cpp
@@ -30,7 +30,6 @@
#include "src/core/NEON/kernels/NESoftmaxLayerKernel.h"
#include "src/core/NEON/kernels/NESoftmaxLayerKernel.h"
#include "src/core/helpers/SoftmaxHelpers.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -83,8 +82,8 @@ void NESoftmaxLayerGeneric<IS_LOG>::configure(ITensor *input, ITensor *output, f
_memory_group.manage(&_tmp);
// Configure kernels
- _max_kernel = arm_compute::support::cpp14::make_unique<NELogits1DMaxKernel>();
- _softmax_kernel = arm_compute::support::cpp14::make_unique<NELogits1DSoftmaxKernel<IS_LOG>>();
+ _max_kernel = std::make_unique<NELogits1DMaxKernel>();
+ _softmax_kernel = std::make_unique<NELogits1DSoftmaxKernel<IS_LOG>>();
_max_kernel->configure(tmp_input, &_max);
if(_needs_permute)
{
@@ -104,7 +103,7 @@ void NESoftmaxLayerGeneric<IS_LOG>::configure(ITensor *input, ITensor *output, f
else
{
// Softmax 2D case
- _fill_border_kernel = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ _fill_border_kernel = std::make_unique<NEFillBorderKernel>();
_fill_border_kernel->configure(tmp_input, _max_kernel->border_size(), BorderMode::REPLICATE);
_softmax_kernel->configure(tmp_input, &_max, output, beta, &_tmp);
}
diff --git a/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp b/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp
index 516e8d604c..10b384157d 100644
--- a/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp
+++ b/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp
@@ -31,7 +31,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEMemsetKernel.h"
#include "src/core/NEON/kernels/NESpaceToBatchLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -49,10 +48,10 @@ void NESpaceToBatchLayer::configure(const ITensor *input, const ITensor *block_s
if(input->info()->tensor_shape().total_size() != output->info()->tensor_shape().total_size())
{
_has_padding = true;
- _memset_kernel = arm_compute::support::cpp14::make_unique<NEMemsetKernel>();
+ _memset_kernel = std::make_unique<NEMemsetKernel>();
_memset_kernel->configure(output, PixelValue(0, input->info()->data_type(), input->info()->quantization_info()));
}
- _space_to_batch_kernel = arm_compute::support::cpp14::make_unique<NESpaceToBatchLayerKernel>();
+ _space_to_batch_kernel = std::make_unique<NESpaceToBatchLayerKernel>();
_space_to_batch_kernel->configure(input, block_shape, paddings, output);
}
@@ -63,10 +62,10 @@ void NESpaceToBatchLayer::configure(const ITensor *input, const int block_shape_
if(input->info()->tensor_shape().total_size() != output->info()->tensor_shape().total_size())
{
_has_padding = true;
- _memset_kernel = arm_compute::support::cpp14::make_unique<NEMemsetKernel>();
+ _memset_kernel = std::make_unique<NEMemsetKernel>();
_memset_kernel->configure(output, PixelValue(0, input->info()->data_type(), input->info()->quantization_info()));
}
- _space_to_batch_kernel = arm_compute::support::cpp14::make_unique<NESpaceToBatchLayerKernel>();
+ _space_to_batch_kernel = std::make_unique<NESpaceToBatchLayerKernel>();
_space_to_batch_kernel->configure(input, block_shape_x, block_shape_y, padding_left, padding_right, output);
}
diff --git a/src/runtime/NEON/functions/NESpaceToDepthLayer.cpp b/src/runtime/NEON/functions/NESpaceToDepthLayer.cpp
index a834600199..1e3776c448 100644
--- a/src/runtime/NEON/functions/NESpaceToDepthLayer.cpp
+++ b/src/runtime/NEON/functions/NESpaceToDepthLayer.cpp
@@ -30,7 +30,6 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NESpaceToDepthLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -44,7 +43,7 @@ NESpaceToDepthLayer::NESpaceToDepthLayer()
void NESpaceToDepthLayer::configure(const ITensor *input, ITensor *output, int32_t block_shape)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- _space_to_depth_kernel = arm_compute::support::cpp14::make_unique<NESpaceToDepthLayerKernel>();
+ _space_to_depth_kernel = std::make_unique<NESpaceToDepthLayerKernel>();
_space_to_depth_kernel->configure(input, output, block_shape);
}
diff --git a/src/runtime/NEON/functions/NEStackLayer.cpp b/src/runtime/NEON/functions/NEStackLayer.cpp
index e38ff6bee7..af5c80d036 100644
--- a/src/runtime/NEON/functions/NEStackLayer.cpp
+++ b/src/runtime/NEON/functions/NEStackLayer.cpp
@@ -31,7 +31,6 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/core/NEON/kernels/NEStackLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -54,7 +53,7 @@ void NEStackLayer::configure(const std::vector<ITensor *> &input, int axis, ITen
for(unsigned int i = 0; i < _num_inputs; i++)
{
- _stack_kernels[i] = arm_compute::support::cpp14::make_unique<NEStackLayerKernel>();
+ _stack_kernels[i] = std::make_unique<NEStackLayerKernel>();
_stack_kernels[i]->configure(input[i], axis_u, i, _num_inputs, output);
}
}
diff --git a/src/runtime/NEON/functions/NEStridedSlice.cpp b/src/runtime/NEON/functions/NEStridedSlice.cpp
index 308b856ec6..fffb38c3ca 100644
--- a/src/runtime/NEON/functions/NEStridedSlice.cpp
+++ b/src/runtime/NEON/functions/NEStridedSlice.cpp
@@ -26,7 +26,6 @@
#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
#include "src/core/NEON/kernels/NEStridedSliceKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -36,7 +35,7 @@ void NEStridedSlice::configure(const ITensorInfo *input, ITensorInfo *output,
const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
{
- auto k = arm_compute::support::cpp14::make_unique<NEStridedSliceKernel>();
+ auto k = std::make_unique<NEStridedSliceKernel>();
k->configure(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
_kernel = std::move(k);
}
@@ -57,7 +56,7 @@ struct NEStridedSlice::Impl
};
NEStridedSlice::NEStridedSlice()
- : _impl(support::cpp14::make_unique<Impl>())
+ : _impl(std::make_unique<Impl>())
{
}
NEStridedSlice::NEStridedSlice(NEStridedSlice &&) = default;
@@ -70,7 +69,7 @@ void NEStridedSlice::configure(const ITensor *input, ITensor *output,
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEStridedSlice>();
+ _impl->op = std::make_unique<experimental::NEStridedSlice>();
_impl->op->configure(input->info(), output->info(), starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
}
diff --git a/src/runtime/NEON/functions/NETableLookup.cpp b/src/runtime/NEON/functions/NETableLookup.cpp
index 9295bf0ece..fde3908c81 100644
--- a/src/runtime/NEON/functions/NETableLookup.cpp
+++ b/src/runtime/NEON/functions/NETableLookup.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NETableLookup.h"
#include "src/core/NEON/kernels/NETableLookupKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,7 +31,7 @@ using namespace arm_compute;
void NETableLookup::configure(const ITensor *input, const ILut *lut, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NETableLookupKernel>();
+ auto k = std::make_unique<NETableLookupKernel>();
k->configure(input, lut, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEThreshold.cpp b/src/runtime/NEON/functions/NEThreshold.cpp
index 2f1e3047b5..4d382d6fab 100644
--- a/src/runtime/NEON/functions/NEThreshold.cpp
+++ b/src/runtime/NEON/functions/NEThreshold.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEThreshold.h"
#include "src/core/NEON/kernels/NEThresholdKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -37,7 +36,7 @@ void NEThreshold::configure(const ITensor *input, ITensor *output, uint8_t thres
void NEThreshold::configure(const ITensor *input, ITensor *output, const ThresholdKernelInfo &info)
{
- auto k = arm_compute::support::cpp14::make_unique<NEThresholdKernel>();
+ auto k = std::make_unique<NEThresholdKernel>();
k->configure(input, output, info);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NETile.cpp b/src/runtime/NEON/functions/NETile.cpp
index 6a1e20ddf8..088816eb95 100644
--- a/src/runtime/NEON/functions/NETile.cpp
+++ b/src/runtime/NEON/functions/NETile.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/NEON/functions/NETile.h"
#include "src/core/NEON/kernels/NETileKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NETile::configure(const ITensor *input, ITensor *output, const Multiples &multiples)
{
- auto k = arm_compute::support::cpp14::make_unique<NETileKernel>();
+ auto k = std::make_unique<NETileKernel>();
k->configure(input, output, multiples);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NETranspose.cpp b/src/runtime/NEON/functions/NETranspose.cpp
index 5af417f4ed..aaa52e36b9 100644
--- a/src/runtime/NEON/functions/NETranspose.cpp
+++ b/src/runtime/NEON/functions/NETranspose.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NETranspose.h"
#include "src/core/NEON/kernels/NETransposeKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -32,7 +31,7 @@ namespace arm_compute
{
void NETranspose::configure(const ITensor *input, ITensor *output)
{
- auto k = arm_compute::support::cpp14::make_unique<NETransposeKernel>();
+ auto k = std::make_unique<NETransposeKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/NEUpsampleLayer.cpp b/src/runtime/NEON/functions/NEUpsampleLayer.cpp
index aae58387e2..1a08494c63 100644
--- a/src/runtime/NEON/functions/NEUpsampleLayer.cpp
+++ b/src/runtime/NEON/functions/NEUpsampleLayer.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/NEON/functions/NEUpsampleLayer.h"
#include "src/core/NEON/kernels/NEUpsampleLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -44,7 +43,7 @@ Status NEUpsampleLayer::validate(const ITensorInfo *input, const ITensorInfo *ou
void NEUpsampleLayer::configure(const ITensor *input, ITensor *output, const Size2D &info, const InterpolationPolicy &policy)
{
_data_layout = input->info()->data_layout();
- _kernel = arm_compute::support::cpp14::make_unique<NEUpsampleLayerKernel>();
+ _kernel = std::make_unique<NEUpsampleLayerKernel>();
_kernel->configure(input, output, info, policy);
}
diff --git a/src/runtime/NEON/functions/NEWarpAffine.cpp b/src/runtime/NEON/functions/NEWarpAffine.cpp
index b5dbfe0d5c..1e8907b895 100644
--- a/src/runtime/NEON/functions/NEWarpAffine.cpp
+++ b/src/runtime/NEON/functions/NEWarpAffine.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/core/Validate.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEWarpKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -42,14 +41,14 @@ void NEWarpAffine::configure(ITensor *input, ITensor *output, const std::array<f
{
case InterpolationPolicy::NEAREST_NEIGHBOR:
{
- auto k = arm_compute::support::cpp14::make_unique<NEWarpAffineKernel<InterpolationPolicy::NEAREST_NEIGHBOR>>();
+ auto k = std::make_unique<NEWarpAffineKernel<InterpolationPolicy::NEAREST_NEIGHBOR>>();
k->configure(input, output, matrix, border_mode, constant_border_value);
_kernel = std::move(k);
break;
}
case InterpolationPolicy::BILINEAR:
{
- auto k = arm_compute::support::cpp14::make_unique<NEWarpAffineKernel<InterpolationPolicy::BILINEAR>>();
+ auto k = std::make_unique<NEWarpAffineKernel<InterpolationPolicy::BILINEAR>>();
k->configure(input, output, matrix, border_mode, constant_border_value);
_kernel = std::move(k);
break;
@@ -59,7 +58,7 @@ void NEWarpAffine::configure(ITensor *input, ITensor *output, const std::array<f
ARM_COMPUTE_ERROR("Interpolation type not supported");
}
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, constant_border_value);
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NEWarpPerspective.cpp b/src/runtime/NEON/functions/NEWarpPerspective.cpp
index 8d42121005..d546da89b8 100644
--- a/src/runtime/NEON/functions/NEWarpPerspective.cpp
+++ b/src/runtime/NEON/functions/NEWarpPerspective.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/core/Validate.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
#include "src/core/NEON/kernels/NEWarpKernel.h"
-#include "support/MemorySupport.h"
#include <utility>
@@ -42,14 +41,14 @@ void NEWarpPerspective::configure(ITensor *input, ITensor *output, const std::ar
{
case InterpolationPolicy::NEAREST_NEIGHBOR:
{
- auto k = arm_compute::support::cpp14::make_unique<NEWarpPerspectiveKernel<InterpolationPolicy::NEAREST_NEIGHBOR>>();
+ auto k = std::make_unique<NEWarpPerspectiveKernel<InterpolationPolicy::NEAREST_NEIGHBOR>>();
k->configure(input, output, matrix, border_mode, constant_border_value);
_kernel = std::move(k);
break;
}
case InterpolationPolicy::BILINEAR:
{
- auto k = arm_compute::support::cpp14::make_unique<NEWarpPerspectiveKernel<InterpolationPolicy::BILINEAR>>();
+ auto k = std::make_unique<NEWarpPerspectiveKernel<InterpolationPolicy::BILINEAR>>();
k->configure(input, output, matrix, border_mode, constant_border_value);
_kernel = std::move(k);
break;
@@ -59,7 +58,7 @@ void NEWarpPerspective::configure(ITensor *input, ITensor *output, const std::ar
ARM_COMPUTE_ERROR("Interpolation type not supported");
}
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(input, _kernel->border_size(), border_mode, constant_border_value);
_border_handler = std::move(b);
}
diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index 1cb2458e13..265df9246f 100644
--- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -35,7 +35,6 @@
#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
#include "src/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h"
-#include "support/MemorySupport.h"
#include "src/core/NEON/kernels/convolution/common/utils.hpp"
#include "src/core/NEON/kernels/convolution/winograd/winograd.hpp"
@@ -351,18 +350,18 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
if(input->info()->dimension(width_idx) > 4 && input->info()->dimension(height_idx) > 4)
{
using config = NEWinogradLayerConfiguration<float, float, 4, 4, 3, 3>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ transform_input_kernel = std::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = std::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = std::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
else
{
using config = NEWinogradLayerConfiguration<float, float, 2, 2, 3, 3>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ transform_input_kernel = std::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = std::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = std::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
@@ -370,63 +369,63 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
else if(kernel_size == Size2D(5, 5))
{
using config = NEWinogradLayerConfiguration<float, float, 2, 2, 5, 5>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ transform_input_kernel = std::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = std::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = std::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
else if(kernel_size == Size2D(1, 3))
{
using config = NEWinogradLayerConfiguration<float, float, 6, 1, 3, 1>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ transform_input_kernel = std::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = std::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = std::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
else if(kernel_size == Size2D(3, 1))
{
using config = NEWinogradLayerConfiguration<float, float, 1, 6, 1, 3>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ transform_input_kernel = std::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = std::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = std::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
else if(kernel_size == Size2D(1, 5))
{
using config = NEWinogradLayerConfiguration<float, float, 4, 1, 5, 1>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ transform_input_kernel = std::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = std::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = std::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
else if(kernel_size == Size2D(5, 1))
{
using config = NEWinogradLayerConfiguration<float, float, 1, 4, 1, 5>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ transform_input_kernel = std::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = std::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = std::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
else if(kernel_size == Size2D(1, 7))
{
using config = NEWinogradLayerConfiguration<float, float, 2, 1, 7, 1>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ transform_input_kernel = std::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = std::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = std::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
else if(kernel_size == Size2D(7, 1))
{
using config = NEWinogradLayerConfiguration<float, float, 1, 2, 1, 7>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ transform_input_kernel = std::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = std::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = std::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
@@ -441,9 +440,9 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
if(kernel_size == Size2D(3, 3))
{
using config = NEWinogradLayerConfiguration<__fp16, __fp16, 4, 4, 3, 3>;
- transform_input_kernel = support::cpp14::make_unique<config::TransformInputKernel>();
- transform_weights_kernel = support::cpp14::make_unique<config::TransformWeightsKernel>();
- transform_output_kernel = support::cpp14::make_unique<config::TransformOutputKernel>();
+ transform_input_kernel = std::make_unique<config::TransformInputKernel>();
+ transform_weights_kernel = std::make_unique<config::TransformWeightsKernel>();
+ transform_output_kernel = std::make_unique<config::TransformOutputKernel>();
n_gemms = config::WinogradBase::N_GEMMS;
N_BLOCK = config::WinogradConv::N_BLOCK;
}
diff --git a/src/runtime/NEON/functions/NEYOLOLayer.cpp b/src/runtime/NEON/functions/NEYOLOLayer.cpp
index 5cad53bffd..515b177060 100644
--- a/src/runtime/NEON/functions/NEYOLOLayer.cpp
+++ b/src/runtime/NEON/functions/NEYOLOLayer.cpp
@@ -24,13 +24,12 @@
#include "arm_compute/runtime/NEON/functions/NEYOLOLayer.h"
#include "src/core/NEON/kernels/NEYOLOLayerKernel.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
void NEYOLOLayer::configure(ITensor *input, ITensor *output, const ActivationLayerInfo &act_info, int32_t num_classes)
{
- auto k = arm_compute::support::cpp14::make_unique<NEYOLOLayerKernel>();
+ auto k = std::make_unique<NEYOLOLayerKernel>();
k->configure(input, output, act_info, num_classes);
_kernel = std::move(k);
}
diff --git a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
index 11e89cb23b..101df98b7d 100644
--- a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
@@ -37,8 +37,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "support/MemorySupport.h"
-
#include <set>
namespace arm_compute
@@ -59,10 +57,10 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> get_qasymm8_convolver(int kern
switch(stride_x)
{
case 1:
- return arm_compute::support::cpp14::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 3, 3, 1, 1>>(
+ return std::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 3, 3, 1, 1>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
case 2:
- return arm_compute::support::cpp14::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 3, 3, 2, 2>>(
+ return std::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 3, 3, 2, 2>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
default:
return nullptr;
@@ -73,10 +71,10 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> get_qasymm8_convolver(int kern
switch(stride_x)
{
case 1:
- return arm_compute::support::cpp14::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 5, 5, 1, 1>>(
+ return std::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 5, 5, 1, 1>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
case 2:
- return arm_compute::support::cpp14::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 5, 5, 2, 2>>(
+ return std::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 5, 5, 2, 2>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
default:
return nullptr;
@@ -101,10 +99,10 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> get_qsymm8_perchannel_convolve
switch(stride_x)
{
case 1:
- return arm_compute::support::cpp14::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 3, 3, 1, 1>>(
+ return std::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 3, 3, 1, 1>>(
n_batches, in_rows, in_cols, n_channels, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
case 2:
- return arm_compute::support::cpp14::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 3, 3, 2, 2>>(
+ return std::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 3, 3, 2, 2>>(
n_batches, in_rows, in_cols, n_channels, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
default:
return nullptr;
@@ -115,10 +113,10 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> get_qsymm8_perchannel_convolve
switch(stride_x)
{
case 1:
- return arm_compute::support::cpp14::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 5, 5, 1, 1>>(
+ return std::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 5, 5, 1, 1>>(
n_batches, in_rows, in_cols, n_channels, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
case 2:
- return arm_compute::support::cpp14::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 5, 5, 2, 2>>(
+ return std::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 5, 5, 2, 2>>(
n_batches, in_rows, in_cols, n_channels, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
default:
return nullptr;
@@ -142,10 +140,10 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> get_fp16_convolver(int kernel_
switch(stride_x)
{
case 1:
- return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 1, 1, float16_t, float16_t, float16_t>>(
+ return std::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 1, 1, float16_t, float16_t, float16_t>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
case 2:
- return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 2, 2, float16_t, float16_t, float16_t>>(
+ return std::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 2, 2, float16_t, float16_t, float16_t>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
default:
return nullptr;
@@ -156,10 +154,10 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> get_fp16_convolver(int kernel_
switch(stride_x)
{
case 1:
- return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 1, 1, float16_t, float16_t, float16_t>>(
+ return std::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 1, 1, float16_t, float16_t, float16_t>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
case 2:
- return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 2, 2, float16_t, float16_t, float16_t>>(
+ return std::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 2, 2, float16_t, float16_t, float16_t>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
default:
return nullptr;
@@ -183,10 +181,10 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> get_fp32_convolver(int kernel_
switch(stride_x)
{
case 1:
- return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float, float>>(
+ return std::make_unique<depthwise::DilatedDepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float, float>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
case 2:
- return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float, float>>(
+ return std::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float, float>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
default:
return nullptr;
@@ -197,10 +195,10 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> get_fp32_convolver(int kernel_
switch(stride_x)
{
case 1:
- return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<4, 4, 5, 5, 1, 1, float, float, float>>(
+ return std::make_unique<depthwise::DilatedDepthwiseConvolution<4, 4, 5, 5, 1, 1, float, float, float>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
case 2:
- return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 2, 2, float, float, float>>(
+ return std::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 2, 2, float, float, float>>(
n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
default:
return nullptr;
@@ -339,7 +337,7 @@ struct NEDepthwiseConvolutionAssemblyDispatch::LocalImpl
#ifndef DOXYGEN_SKIP_THIS
NEDepthwiseConvolutionAssemblyDispatch::NEDepthwiseConvolutionAssemblyDispatch(std::shared_ptr<arm_compute::IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _input(nullptr), _weights(nullptr), _bias(nullptr), _output(nullptr), _packed_weights(), _workspace(), _is_prepared(false),
- _pImpl(support::cpp14::make_unique<LocalImpl>())
+ _pImpl(std::make_unique<LocalImpl>())
{
}
#endif /* DOXYGEN_SKIP_THIS */
diff --git a/src/runtime/OffsetLifetimeManager.cpp b/src/runtime/OffsetLifetimeManager.cpp
index 3bd8b02cf1..a47fa184fa 100644
--- a/src/runtime/OffsetLifetimeManager.cpp
+++ b/src/runtime/OffsetLifetimeManager.cpp
@@ -27,7 +27,6 @@
#include "arm_compute/runtime/IAllocator.h"
#include "arm_compute/runtime/IMemoryGroup.h"
#include "arm_compute/runtime/OffsetMemoryPool.h"
-#include "support/MemorySupport.h"
#include <algorithm>
#include <cmath>
@@ -57,7 +56,7 @@ const OffsetLifetimeManager::info_type &OffsetLifetimeManager::info() const
std::unique_ptr<IMemoryPool> OffsetLifetimeManager::create_pool(IAllocator *allocator)
{
ARM_COMPUTE_ERROR_ON(allocator == nullptr);
- return support::cpp14::make_unique<OffsetMemoryPool>(allocator, _blob);
+ return std::make_unique<OffsetMemoryPool>(allocator, _blob);
}
MappingType OffsetLifetimeManager::mapping_type() const
diff --git a/src/runtime/OffsetMemoryPool.cpp b/src/runtime/OffsetMemoryPool.cpp
index 677c55c7c4..ffedf5586c 100644
--- a/src/runtime/OffsetMemoryPool.cpp
+++ b/src/runtime/OffsetMemoryPool.cpp
@@ -30,7 +30,6 @@
#include "arm_compute/runtime/IMemoryPool.h"
#include "arm_compute/runtime/MemoryRegion.h"
#include "arm_compute/runtime/Types.h"
-#include "support/MemorySupport.h"
namespace arm_compute
{
@@ -75,6 +74,6 @@ MappingType OffsetMemoryPool::mapping_type() const
std::unique_ptr<IMemoryPool> OffsetMemoryPool::duplicate()
{
ARM_COMPUTE_ERROR_ON(!_allocator);
- return support::cpp14::make_unique<OffsetMemoryPool>(_allocator, _blob_info);
+ return std::make_unique<OffsetMemoryPool>(_allocator, _blob_info);
}
} // namespace arm_compute
diff --git a/src/runtime/PoolManager.cpp b/src/runtime/PoolManager.cpp
index 19ed2577dc..87376a71a4 100644
--- a/src/runtime/PoolManager.cpp
+++ b/src/runtime/PoolManager.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/IMemoryPool.h"
-#include "support/MemorySupport.h"
#include <algorithm>
#include <list>
@@ -71,7 +70,7 @@ void PoolManager::register_pool(std::unique_ptr<IMemoryPool> pool)
_free_pools.push_front(std::move(pool));
// Update semaphore
- _sem = arm_compute::support::cpp14::make_unique<arm_compute::Semaphore>(_free_pools.size());
+ _sem = std::make_unique<arm_compute::Semaphore>(_free_pools.size());
}
std::unique_ptr<IMemoryPool> PoolManager::release_pool()
@@ -86,7 +85,7 @@ std::unique_ptr<IMemoryPool> PoolManager::release_pool()
_free_pools.pop_front();
// Update semaphore
- _sem = arm_compute::support::cpp14::make_unique<arm_compute::Semaphore>(_free_pools.size());
+ _sem = std::make_unique<arm_compute::Semaphore>(_free_pools.size());
return pool;
}
diff --git a/src/runtime/Scheduler.cpp b/src/runtime/Scheduler.cpp
index 5b3010b173..0713b9a2ad 100644
--- a/src/runtime/Scheduler.cpp
+++ b/src/runtime/Scheduler.cpp
@@ -24,7 +24,6 @@
#include "arm_compute/runtime/Scheduler.h"
#include "arm_compute/core/Error.h"
-#include "support/MemorySupport.h"
#if ARM_COMPUTE_CPP_SCHEDULER
#include "arm_compute/runtime/CPP/CPPScheduler.h"
@@ -55,12 +54,12 @@ namespace
std::map<Scheduler::Type, std::unique_ptr<IScheduler>> init()
{
std::map<Scheduler::Type, std::unique_ptr<IScheduler>> m;
- m[Scheduler::Type::ST] = support::cpp14::make_unique<SingleThreadScheduler>();
+ m[Scheduler::Type::ST] = std::make_unique<SingleThreadScheduler>();
#if defined(ARM_COMPUTE_CPP_SCHEDULER)
- m[Scheduler::Type::CPP] = support::cpp14::make_unique<CPPScheduler>();
+ m[Scheduler::Type::CPP] = std::make_unique<CPPScheduler>();
#endif // defined(ARM_COMPUTE_CPP_SCHEDULER)
#if defined(ARM_COMPUTE_OPENMP_SCHEDULER)
- m[Scheduler::Type::OMP] = support::cpp14::make_unique<OMPScheduler>();
+ m[Scheduler::Type::OMP] = std::make_unique<OMPScheduler>();
#endif // defined(ARM_COMPUTE_OPENMP_SCHEDULER)
return m;
diff --git a/src/runtime/SchedulerFactory.cpp b/src/runtime/SchedulerFactory.cpp
index e395c2e029..cc21d62630 100644
--- a/src/runtime/SchedulerFactory.cpp
+++ b/src/runtime/SchedulerFactory.cpp
@@ -23,8 +23,6 @@
*/
#include "arm_compute/runtime/SchedulerFactory.h"
-#include "support/MemorySupport.h"
-
#include "arm_compute/core/Error.h"
#if ARM_COMPUTE_CPP_SCHEDULER
#include "arm_compute/runtime/CPP/CPPScheduler.h"
@@ -54,12 +52,12 @@ std::unique_ptr<IScheduler> SchedulerFactory::create(Type type)
{
case Type::ST:
{
- return support::cpp14::make_unique<SingleThreadScheduler>();
+ return std::make_unique<SingleThreadScheduler>();
}
case Type::CPP:
{
#if ARM_COMPUTE_CPP_SCHEDULER
- return support::cpp14::make_unique<CPPScheduler>();
+ return std::make_unique<CPPScheduler>();
#else /* ARM_COMPUTE_CPP_SCHEDULER */
ARM_COMPUTE_ERROR("Recompile with cppthreads=1 to use C++11 scheduler.");
#endif /* ARM_COMPUTE_CPP_SCHEDULER */
@@ -67,7 +65,7 @@ std::unique_ptr<IScheduler> SchedulerFactory::create(Type type)
case Type::OMP:
{
#if ARM_COMPUTE_OPENMP_SCHEDULER
- return support::cpp14::make_unique<OMPScheduler>();
+ return std::make_unique<OMPScheduler>();
#else /* ARM_COMPUTE_OPENMP_SCHEDULER */
ARM_COMPUTE_ERROR("Recompile with openmp=1 to use openmp scheduler.");
#endif /* ARM_COMPUTE_OPENMP_SCHEDULER */
diff --git a/src/runtime/TensorAllocator.cpp b/src/runtime/TensorAllocator.cpp
index e8c5c49018..4ae27c59fc 100644
--- a/src/runtime/TensorAllocator.cpp
+++ b/src/runtime/TensorAllocator.cpp
@@ -28,7 +28,6 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/MemoryGroup.h"
#include "arm_compute/runtime/MemoryRegion.h"
-#include "support/MemorySupport.h"
#include <cstddef>
@@ -136,7 +135,7 @@ void TensorAllocator::allocate()
const size_t alignment_to_use = (alignment() != 0) ? alignment() : 64;
if(_associated_memory_group == nullptr)
{
- _memory.set_owned_region(support::cpp14::make_unique<MemoryRegion>(info().total_size(), alignment_to_use));
+ _memory.set_owned_region(std::make_unique<MemoryRegion>(info().total_size(), alignment_to_use));
}
else
{
@@ -157,7 +156,7 @@ Status TensorAllocator::import_memory(void *memory)
ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
ARM_COMPUTE_RETURN_ERROR_ON(alignment() != 0 && !arm_compute::utility::check_aligned(memory, alignment()));
- _memory.set_owned_region(support::cpp14::make_unique<MemoryRegion>(memory, info().total_size()));
+ _memory.set_owned_region(std::make_unique<MemoryRegion>(memory, info().total_size()));
info().set_is_resizable(false);
return Status{};
diff --git a/support/MemorySupport.h b/support/MemorySupport.h
deleted file mode 100644
index a904f34f97..0000000000
--- a/support/MemorySupport.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_SUPPORT_MEMORYSUPPORT
-#define ARM_COMPUTE_SUPPORT_MEMORYSUPPORT
-
-#include <memory>
-
-namespace arm_compute
-{
-namespace support
-{
-namespace cpp11
-{
-// std::align is missing in GCC 4.9
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57350
-inline void *align(std::size_t alignment, std::size_t size, void *&ptr, std::size_t &space)
-{
- std::uintptr_t pn = reinterpret_cast<std::uintptr_t>(ptr);
- std::uintptr_t aligned = (pn + alignment - 1) & -alignment;
- std::size_t padding = aligned - pn;
- if(space < size + padding)
- {
- return nullptr;
- }
-
- space -= padding;
-
- return ptr = reinterpret_cast<void *>(aligned);
-}
-} //namespace cpp11
-namespace cpp14
-{
-/** make_unique is missing in CPP11. Re-implement it according to the standard proposal. */
-
-/**<Template for single object */
-template <class T>
-struct _Unique_if
-{
- typedef std::unique_ptr<T> _Single_object; /**< Single object type */
-};
-
-/** Template for array */
-template <class T>
-struct _Unique_if<T[]>
-{
- typedef std::unique_ptr<T[]> _Unknown_bound; /**< Array type */
-};
-
-/** Template for array with known bounds (to throw an error).
- *
- * @note this is intended to never be hit.
- */
-template <class T, size_t N>
-struct _Unique_if<T[N]>
-{
- typedef void _Known_bound; /**< Should never be used */
-};
-
-/** Construct a single object and return a unique pointer to it.
- *
- * @param[in] args Constructor arguments.
- *
- * @return a unique pointer to the new object.
- */
-template <class T, class... Args>
-typename _Unique_if<T>::_Single_object
-make_unique(Args &&... args)
-{
- return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
-}
-
-/** Construct an array of objects and return a unique pointer to it.
- *
- * @param[in] n Array size
- *
- * @return a unique pointer to the new array.
- */
-template <class T>
-typename _Unique_if<T>::_Unknown_bound
-make_unique(size_t n)
-{
- typedef typename std::remove_extent<T>::type U;
- return std::unique_ptr<T>(new U[n]());
-}
-
-/** It is invalid to attempt to make_unique an array with known bounds. */
-template <class T, class... Args>
-typename _Unique_if<T>::_Known_bound
-make_unique(Args &&...) = delete;
-} // namespace cpp14
-} // namespace support
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_SUPPORT_MEMORYSUPPORT */
diff --git a/tests/CL/Helper.h b/tests/CL/Helper.h
index e548af4938..d217af6e18 100644
--- a/tests/CL/Helper.h
+++ b/tests/CL/Helper.h
@@ -33,7 +33,7 @@
#include "src/core/CL/ICLKernel.h"
-#include "support/MemorySupport.h"
+#include <memory>
namespace arm_compute
{
@@ -51,7 +51,7 @@ public:
template <typename... Args>
void configure(Args &&... args)
{
- auto k = arm_compute::support::cpp14::make_unique<K>();
+ auto k = std::make_unique<K>();
k->configure(std::forward<Args>(args)...);
_kernel = std::move(k);
}
@@ -63,7 +63,7 @@ public:
template <typename... Args>
void configure(GPUTarget gpu_target, Args &&... args)
{
- auto k = arm_compute::support::cpp14::make_unique<K>();
+ auto k = std::make_unique<K>();
k->set_target(gpu_target);
k->configure(std::forward<Args>(args)...);
_kernel = std::move(k);
@@ -92,7 +92,7 @@ public:
template <typename T, typename... Args>
void configure(T first, Args &&... args)
{
- auto k = arm_compute::support::cpp14::make_unique<K>();
+ auto k = std::make_unique<K>();
k->configure(first, std::forward<Args>(args)...);
_kernel = std::move(k);
_border_handler->configure(first, BorderSize(bordersize), BorderMode::CONSTANT, PixelValue());
@@ -113,7 +113,7 @@ public:
template <typename T, typename... Args>
void configure(T first, T second, Args &&... args)
{
- auto k = arm_compute::support::cpp14::make_unique<K>();
+ auto k = std::make_unique<K>();
k->set_target(CLScheduler::get().target());
k->configure(first, second, std::forward<Args>(args)...);
_kernel = std::move(k);
diff --git a/tests/NEON/Helper.h b/tests/NEON/Helper.h
index ea47a416b1..714152ebcd 100644
--- a/tests/NEON/Helper.h
+++ b/tests/NEON/Helper.h
@@ -28,11 +28,11 @@
#include "arm_compute/runtime/NEON/INESimpleFunction.h"
#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
#include "src/core/NEON/kernels/NEFillBorderKernel.h"
-#include "support/MemorySupport.h"
#include "tests/Globals.h"
#include <algorithm>
#include <array>
+#include <memory>
#include <vector>
namespace arm_compute
@@ -64,7 +64,7 @@ public:
template <typename... Args>
void configure(Args &&... args)
{
- auto k = arm_compute::support::cpp14::make_unique<K>();
+ auto k = std::make_unique<K>();
k->configure(std::forward<Args>(args)...);
_kernel = std::move(k);
}
@@ -92,11 +92,11 @@ public:
template <typename T, typename... Args>
void configure(T first, Args &&... args)
{
- auto k = arm_compute::support::cpp14::make_unique<K>();
+ auto k = std::make_unique<K>();
k->configure(first, std::forward<Args>(args)...);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(first, BorderSize(bordersize), BorderMode::CONSTANT, PixelValue());
_border_handler = std::move(b);
}
@@ -115,11 +115,11 @@ public:
template <typename T, typename... Args>
void configure(T first, Args &&... args)
{
- auto k = arm_compute::support::cpp14::make_unique<K>();
+ auto k = std::make_unique<K>();
k->configure(first, std::forward<Args>(args)...);
_kernel = std::move(k);
- auto b = arm_compute::support::cpp14::make_unique<NEFillBorderKernel>();
+ auto b = std::make_unique<NEFillBorderKernel>();
b->configure(first, BorderSize(_kernel->border_size()), BorderMode::CONSTANT, PixelValue());
_border_handler = std::move(b);
}
diff --git a/tests/RawTensor.cpp b/tests/RawTensor.cpp
index a32886e425..8d610a4969 100644
--- a/tests/RawTensor.cpp
+++ b/tests/RawTensor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,20 +30,20 @@ namespace test
RawTensor::RawTensor(TensorShape shape, Format format)
: SimpleTensor(shape, format)
{
- _buffer = support::cpp14::make_unique<uint8_t[]>(SimpleTensor::num_elements() * SimpleTensor::num_channels() * SimpleTensor::element_size());
+ _buffer = std::make_unique<uint8_t[]>(SimpleTensor::num_elements() * SimpleTensor::num_channels() * SimpleTensor::element_size());
}
RawTensor::RawTensor(TensorShape shape, DataType data_type, int num_channels)
: SimpleTensor(shape, data_type, num_channels)
{
- _buffer = support::cpp14::make_unique<uint8_t[]>(SimpleTensor::num_elements() * SimpleTensor::num_channels() * SimpleTensor::element_size());
+ _buffer = std::make_unique<uint8_t[]>(SimpleTensor::num_elements() * SimpleTensor::num_channels() * SimpleTensor::element_size());
}
RawTensor::RawTensor(const RawTensor &tensor)
: SimpleTensor(tensor.shape(), tensor.data_type(), tensor.num_channels())
{
_format = tensor.format();
- _buffer = support::cpp14::make_unique<uint8_t[]>(num_elements() * num_channels() * element_size());
+ _buffer = std::make_unique<uint8_t[]>(num_elements() * num_channels() * element_size());
std::copy_n(tensor.data(), num_elements() * num_channels() * element_size(), _buffer.get());
}
diff --git a/tests/SimpleTensor.h b/tests/SimpleTensor.h
index 82a53521ac..c1bd7f87b5 100644
--- a/tests/SimpleTensor.h
+++ b/tests/SimpleTensor.h
@@ -27,7 +27,6 @@
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Utils.h"
-#include "support/MemorySupport.h"
#include "tests/IAccessor.h"
#include "tests/Utils.h"
@@ -268,7 +267,7 @@ SimpleTensor<T>::SimpleTensor(TensorShape shape, Format format)
_data_layout(DataLayout::NCHW)
{
_num_channels = num_channels();
- _buffer = support::cpp14::make_unique<T[]>(num_elements() * _num_channels);
+ _buffer = std::make_unique<T[]>(num_elements() * _num_channels);
}
template <typename T>
@@ -280,7 +279,7 @@ SimpleTensor<T>::SimpleTensor(TensorShape shape, DataType data_type, int num_cha
_quantization_info(quantization_info),
_data_layout(data_layout)
{
- _buffer = support::cpp14::make_unique<T[]>(this->_shape.total_size() * _num_channels);
+ _buffer = std::make_unique<T[]>(this->_shape.total_size() * _num_channels);
}
template <typename T>
@@ -293,7 +292,7 @@ SimpleTensor<T>::SimpleTensor(const SimpleTensor &tensor)
_quantization_info(tensor.quantization_info()),
_data_layout(tensor.data_layout())
{
- _buffer = support::cpp14::make_unique<T[]>(tensor.num_elements() * _num_channels);
+ _buffer = std::make_unique<T[]>(tensor.num_elements() * _num_channels);
std::copy_n(tensor.data(), this->_shape.total_size() * _num_channels, _buffer.get());
}
diff --git a/tests/framework/Framework.cpp b/tests/framework/Framework.cpp
index 8e836ee41f..a1c684c08a 100644
--- a/tests/framework/Framework.cpp
+++ b/tests/framework/Framework.cpp
@@ -24,7 +24,6 @@
#include "Framework.h"
#include "arm_compute/runtime/Scheduler.h"
-#include "support/MemorySupport.h"
#include "tests/framework/ParametersLibrary.h"
#include "tests/framework/TestFilter.h"
@@ -36,6 +35,7 @@
#include <chrono>
#include <iostream>
+#include <memory>
#include <sstream>
#include <type_traits>
@@ -94,7 +94,7 @@ Framework::Framework()
Instrument::make_instrument<OpenCLMemoryUsage, ScaleFactor::SCALE_1M>);
#endif /* ARM_COMPUTE_CL */
- instruments_info = support::cpp14::make_unique<InstrumentsInfo>();
+ instruments_info = std::make_unique<InstrumentsInfo>();
}
std::set<InstrumentsDescription> Framework::available_instruments() const
diff --git a/tests/framework/Framework.h b/tests/framework/Framework.h
index 01ab37347e..cf854f2351 100644
--- a/tests/framework/Framework.h
+++ b/tests/framework/Framework.h
@@ -355,7 +355,7 @@ private:
template <typename T>
inline void Framework::add_test_case(std::string test_name, DatasetMode mode, TestCaseFactory::Status status)
{
- _test_factories.emplace_back(support::cpp14::make_unique<SimpleTestCaseFactory<T>>(current_suite_name(), std::move(test_name), mode, status));
+ _test_factories.emplace_back(std::make_unique<SimpleTestCaseFactory<T>>(current_suite_name(), std::move(test_name), mode, status));
}
template <typename T, typename D>
diff --git a/tests/framework/TestCaseFactory.h b/tests/framework/TestCaseFactory.h
index 97ba230743..a41226af24 100644
--- a/tests/framework/TestCaseFactory.h
+++ b/tests/framework/TestCaseFactory.h
@@ -26,7 +26,6 @@
#include "DatasetModes.h"
#include "TestCase.h"
-#include "support/MemorySupport.h"
#include <memory>
#include <string>
@@ -183,7 +182,7 @@ inline ::std::ostream &operator<<(::std::ostream &stream, TestCaseFactory::Statu
template <typename T>
inline std::unique_ptr<TestCase> SimpleTestCaseFactory<T>::make() const
{
- return support::cpp14::make_unique<T>();
+ return std::make_unique<T>();
}
template <typename T, typename D>
@@ -195,7 +194,7 @@ inline DataTestCaseFactory<T, D>::DataTestCaseFactory(std::string suite_name, st
template <typename T, typename D>
inline std::unique_ptr<TestCase> DataTestCaseFactory<T, D>::make() const
{
- return support::cpp14::make_unique<T>(_data);
+ return std::make_unique<T>(_data);
}
} // namespace framework
} // namespace test
diff --git a/tests/framework/command_line/CommonOptions.cpp b/tests/framework/command_line/CommonOptions.cpp
index b4bf58bfdc..6fb37470c1 100644
--- a/tests/framework/command_line/CommonOptions.cpp
+++ b/tests/framework/command_line/CommonOptions.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 Arm Limited.
+ * Copyright (c) 2018-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -101,7 +101,7 @@ std::vector<std::unique_ptr<Printer>> CommonOptions::create_printers()
if(pretty_console->value() && (log_file->is_set() || log_format->value() != LogFormat::PRETTY))
{
- auto pretty_printer = support::cpp14::make_unique<PrettyPrinter>();
+ auto pretty_printer = std::make_unique<PrettyPrinter>();
pretty_printer->set_color_output(color_output->value());
printers.push_back(std::move(pretty_printer));
}
@@ -110,13 +110,13 @@ std::vector<std::unique_ptr<Printer>> CommonOptions::create_printers()
switch(log_format->value())
{
case LogFormat::JSON:
- printer = support::cpp14::make_unique<JSONPrinter>();
+ printer = std::make_unique<JSONPrinter>();
break;
case LogFormat::NONE:
break;
case LogFormat::PRETTY:
default:
- auto pretty_printer = support::cpp14::make_unique<PrettyPrinter>();
+ auto pretty_printer = std::make_unique<PrettyPrinter>();
// Don't use colours if we print to a file:
pretty_printer->set_color_output((!log_file->is_set()) && color_output->value());
printer = std::move(pretty_printer);
@@ -139,14 +139,14 @@ std::vector<std::unique_ptr<Printer>> CommonOptions::create_printers()
if(json_file->is_set())
{
- printers.push_back(support::cpp14::make_unique<JSONPrinter>());
+ printers.push_back(std::make_unique<JSONPrinter>());
log_streams.push_back(std::make_shared<std::ofstream>(json_file->value()));
printers.back()->set_stream(*log_streams.back().get());
}
if(pretty_file->is_set())
{
- printers.push_back(support::cpp14::make_unique<PrettyPrinter>());
+ printers.push_back(std::make_unique<PrettyPrinter>());
log_streams.push_back(std::make_shared<std::ofstream>(pretty_file->value()));
printers.back()->set_stream(*log_streams.back().get());
}
diff --git a/tests/framework/instruments/Instrument.h b/tests/framework/instruments/Instrument.h
index 4506460515..3ea15825ad 100644
--- a/tests/framework/instruments/Instrument.h
+++ b/tests/framework/instruments/Instrument.h
@@ -24,8 +24,6 @@
#ifndef ARM_COMPUTE_TEST_INSTRUMENT
#define ARM_COMPUTE_TEST_INSTRUMENT
-#include "support/MemorySupport.h"
-
#include "../Utils.h"
#include "Measurement.h"
@@ -135,7 +133,7 @@ protected:
template <typename T, ScaleFactor scale>
inline std::unique_ptr<Instrument> Instrument::make_instrument()
{
- return support::cpp14::make_unique<T>(scale);
+ return std::make_unique<T>(scale);
}
} // namespace framework
diff --git a/tests/framework/instruments/SchedulerTimer.cpp b/tests/framework/instruments/SchedulerTimer.cpp
index b4d1c597e7..c81b807c3e 100644
--- a/tests/framework/instruments/SchedulerTimer.cpp
+++ b/tests/framework/instruments/SchedulerTimer.cpp
@@ -188,7 +188,7 @@ void SchedulerClock<output_timestamps>::test_start()
{
if(user != nullptr && user->scheduler() != nullptr)
{
- user->intercept_scheduler(support::cpp14::make_unique<Interceptor<output_timestamps>>(_kernels, *user->scheduler(), _scale_factor));
+ user->intercept_scheduler(std::make_unique<Interceptor<output_timestamps>>(_kernels, *user->scheduler(), _scale_factor));
}
});
}
diff --git a/tests/main.cpp b/tests/main.cpp
index f0d5df7d84..46a081b6c8 100644
--- a/tests/main.cpp
+++ b/tests/main.cpp
@@ -21,7 +21,6 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "support/MemorySupport.h"
#include "support/StringSupport.h"
#include "tests/AssetsLibrary.h"
#include "tests/framework/DatasetModes.h"
@@ -166,20 +165,20 @@ int main(int argc, char **argv)
Scheduler::get().set_num_threads(threads->value());
// Create CPU context
- auto cpu_ctx = support::cpp14::make_unique<RuntimeContext>();
+ auto cpu_ctx = std::make_unique<RuntimeContext>();
cpu_ctx->set_scheduler(&Scheduler::get());
// Track CPU context
- auto cpu_ctx_track = support::cpp14::make_unique<ContextSchedulerUser>(cpu_ctx.get());
+ auto cpu_ctx_track = std::make_unique<ContextSchedulerUser>(cpu_ctx.get());
// Create parameters
- parameters = support::cpp14::make_unique<ParametersLibrary>();
+ parameters = std::make_unique<ParametersLibrary>();
parameters->set_cpu_ctx(std::move(cpu_ctx));
#ifdef ARM_COMPUTE_GC
// Setup OpenGL context
{
- auto gles_ctx = support::cpp14::make_unique<GCRuntimeContext>();
+ auto gles_ctx = std::make_unique<GCRuntimeContext>();
ARM_COMPUTE_ERROR_ON(gles_ctx == nullptr);
{
// Legacy singletons API: This has been deprecated and the singletons will be removed
@@ -312,8 +311,8 @@ int main(int argc, char **argv)
return 0;
}
- library = support::cpp14::make_unique<AssetsLibrary>(assets->value(), seed->value());
- fixed_library = support::cpp14::make_unique<AssetsLibrary>(assets->value(), fixed_seed);
+ library = std::make_unique<AssetsLibrary>(assets->value(), seed->value());
+ fixed_library = std::make_unique<AssetsLibrary>(assets->value(), fixed_seed);
if(!parser.validate())
{
diff --git a/tests/validate_examples/RunExample.cpp b/tests/validate_examples/RunExample.cpp
index aca4ddcc7c..736d4816f5 100644
--- a/tests/validate_examples/RunExample.cpp
+++ b/tests/validate_examples/RunExample.cpp
@@ -27,8 +27,8 @@
#include "utils/Utils.cpp"
#include "ValidateExample.h"
-#include "arm_compute/runtime/Scheduler.h"
#include "arm_compute/runtime/CL/CLHelpers.h"
+#include "arm_compute/runtime/Scheduler.h"
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/framework/Framework.h"
@@ -139,8 +139,8 @@ int run_example(int argc, char **argv, std::unique_ptr<ValidateExample> example)
g_example_argv.emplace_back(const_cast<char *>(arg.c_str())); // NOLINT
}
- library = support::cpp14::make_unique<AssetsLibrary>("." /* Only using random values */, seed->value());
- fixed_library = support::cpp14::make_unique<AssetsLibrary>(".", fixed_seed);
+ library = std::make_unique<AssetsLibrary>("." /* Only using random values */, seed->value());
+ fixed_library = std::make_unique<AssetsLibrary>(".", fixed_seed);
if(options.log_level->value() > framework::LogLevel::NONE)
{
diff --git a/tests/validate_examples/graph_validate_utils.h b/tests/validate_examples/graph_validate_utils.h
index 36134a4cea..f6f47cc2c3 100644
--- a/tests/validate_examples/graph_validate_utils.h
+++ b/tests/validate_examples/graph_validate_utils.h
@@ -337,11 +337,11 @@ inline std::unique_ptr<graph::ITensorAccessor> get_accessor(const TensorParams &
{
if(!tensor.npy.empty())
{
- return arm_compute::support::cpp14::make_unique<arm_compute::graph_utils::NumPyBinLoader>(tensor.npy);
+ return std::make_unique<arm_compute::graph_utils::NumPyBinLoader>(tensor.npy);
}
else
{
- return arm_compute::support::cpp14::make_unique<arm_compute::graph_utils::RandomAccessor>(lower, upper, seed);
+ return std::make_unique<arm_compute::graph_utils::RandomAccessor>(lower, upper, seed);
}
}
@@ -607,17 +607,17 @@ inline std::unique_ptr<graph::ITensorAccessor> get_verify_accessor(ExampleParams
{
case DataType::QASYMM8:
{
- return arm_compute::support::cpp14::make_unique<VerifyAccessorT<uint8_t>>(
+ return std::make_unique<VerifyAccessorT<uint8_t>>(
params);
}
case DataType::F16:
{
- return arm_compute::support::cpp14::make_unique<VerifyAccessorT<half>>(
+ return std::make_unique<VerifyAccessorT<half>>(
params);
}
case DataType::F32:
{
- return arm_compute::support::cpp14::make_unique<VerifyAccessorT<float>>(
+ return std::make_unique<VerifyAccessorT<float>>(
params);
}
default:
diff --git a/tests/validation/CL/UNIT/TensorAllocator.cpp b/tests/validation/CL/UNIT/TensorAllocator.cpp
index 9db98fb534..3ccdd99fe3 100644
--- a/tests/validation/CL/UNIT/TensorAllocator.cpp
+++ b/tests/validation/CL/UNIT/TensorAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 Arm Limited.
+ * Copyright (c) 2018-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -135,10 +135,10 @@ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
const size_t total_size_in_bytes = tensor.info()->total_size();
const size_t alignment = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE>();
size_t space = total_size_in_bytes + alignment;
- auto raw_data = support::cpp14::make_unique<uint8_t[]>(space);
+ auto raw_data = std::make_unique<uint8_t[]>(space);
void *aligned_ptr = raw_data.get();
- support::cpp11::align(alignment, total_size_in_bytes, aligned_ptr, space);
+ std::align(alignment, total_size_in_bytes, aligned_ptr, space);
cl::Buffer wrapped_buffer(import_malloc_memory_helper(aligned_ptr, total_size_in_bytes));
ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(wrapped_buffer)), framework::LogLevel::ERRORS);
diff --git a/tests/validation/NEON/UNIT/TensorAllocator.cpp b/tests/validation/NEON/UNIT/TensorAllocator.cpp
index 273d2e0a4f..ef19524d1c 100644
--- a/tests/validation/NEON/UNIT/TensorAllocator.cpp
+++ b/tests/validation/NEON/UNIT/TensorAllocator.cpp
@@ -29,8 +29,6 @@
#include "arm_compute/runtime/MemoryRegion.h"
#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
-#include "support/MemorySupport.h"
-
#include "tests/Globals.h"
#include "tests/Utils.h"
#include "tests/framework/Asserts.h"
@@ -58,7 +56,7 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
// Allocate memory buffer
const size_t total_size = info.total_size();
- auto data = support::cpp14::make_unique<uint8_t[]>(total_size);
+ auto data = std::make_unique<uint8_t[]>(total_size);
// Negative case : Import nullptr
Tensor t1;
@@ -111,10 +109,10 @@ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
const size_t total_size_in_elems = tensor.info()->tensor_shape().total_size();
const size_t total_size_in_bytes = tensor.info()->total_size();
size_t space = total_size_in_bytes + required_alignment;
- auto raw_data = support::cpp14::make_unique<uint8_t[]>(space);
+ auto raw_data = std::make_unique<uint8_t[]>(space);
void *aligned_ptr = raw_data.get();
- support::cpp11::align(required_alignment, total_size_in_bytes, aligned_ptr, space);
+ std::align(required_alignment, total_size_in_bytes, aligned_ptr, space);
ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(aligned_ptr)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -160,7 +158,7 @@ TEST_CASE(ImportMemoryMallocPadded, framework::DatasetMode::ALL)
// Allocate and import tensor
const size_t total_size_in_bytes = tensor.info()->total_size();
- auto raw_data = support::cpp14::make_unique<uint8_t[]>(total_size_in_bytes);
+ auto raw_data = std::make_unique<uint8_t[]>(total_size_in_bytes);
ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(raw_data.get())), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
diff --git a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h
index 74e62fb77f..4ac19bf3ba 100644
--- a/tests/validation/fixtures/UNIT/DynamicTensorFixture.h
+++ b/tests/validation/fixtures/UNIT/DynamicTensorFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Arm Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -264,7 +264,7 @@ public:
_info = info;
// Create function
- _f_target = support::cpp14::make_unique<ComplexFunctionType>(_ms.mm);
+ _f_target = std::make_unique<ComplexFunctionType>(_ms.mm);
}
void run_iteration(unsigned int idx)
@@ -425,7 +425,7 @@ protected:
for(unsigned int i = 0; i < num_functions; ++i)
{
- _functions.emplace_back(support::cpp14::make_unique<ComplexFunctionType>(_ms.mm));
+ _functions.emplace_back(std::make_unique<ComplexFunctionType>(_ms.mm));
}
for(unsigned int i = 0; i < num_resizes; ++i)
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index 9ab9e54ce0..acd924da28 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Arm Limited.
+ * Copyright (c) 2017-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -459,7 +459,7 @@ private:
*/
inline std::unique_ptr<graph::ITensorAccessor> get_random_accessor(PixelValue lower, PixelValue upper, const std::random_device::result_type seed = 0)
{
- return arm_compute::support::cpp14::make_unique<RandomAccessor>(lower, upper, seed);
+ return std::make_unique<RandomAccessor>(lower, upper, seed);
}
/** Generates appropriate weights accessor according to the specified path
@@ -478,11 +478,11 @@ inline std::unique_ptr<graph::ITensorAccessor> get_weights_accessor(const std::s
{
if(path.empty())
{
- return arm_compute::support::cpp14::make_unique<DummyAccessor>();
+ return std::make_unique<DummyAccessor>();
}
else
{
- return arm_compute::support::cpp14::make_unique<NumPyBinLoader>(path + data_file, file_layout);
+ return std::make_unique<NumPyBinLoader>(path + data_file, file_layout);
}
}
@@ -500,12 +500,12 @@ inline std::unique_ptr<graph::ITensorAccessor> get_input_accessor(const arm_comp
{
if(!graph_parameters.validation_file.empty())
{
- return arm_compute::support::cpp14::make_unique<ValidationInputAccessor>(graph_parameters.validation_file,
- graph_parameters.validation_path,
- std::move(preprocessor),
- bgr,
- graph_parameters.validation_range_start,
- graph_parameters.validation_range_end);
+ return std::make_unique<ValidationInputAccessor>(graph_parameters.validation_file,
+ graph_parameters.validation_path,
+ std::move(preprocessor),
+ bgr,
+ graph_parameters.validation_range_start,
+ graph_parameters.validation_range_end);
}
else
{
@@ -513,17 +513,17 @@ inline std::unique_ptr<graph::ITensorAccessor> get_input_accessor(const arm_comp
const std::string &image_file_lower = lower_string(image_file);
if(arm_compute::utility::endswith(image_file_lower, ".npy"))
{
- return arm_compute::support::cpp14::make_unique<NumPyBinLoader>(image_file, graph_parameters.data_layout);
+ return std::make_unique<NumPyBinLoader>(image_file, graph_parameters.data_layout);
}
else if(arm_compute::utility::endswith(image_file_lower, ".jpeg")
|| arm_compute::utility::endswith(image_file_lower, ".jpg")
|| arm_compute::utility::endswith(image_file_lower, ".ppm"))
{
- return arm_compute::support::cpp14::make_unique<ImageAccessor>(image_file, bgr, std::move(preprocessor));
+ return std::make_unique<ImageAccessor>(image_file, bgr, std::move(preprocessor));
}
else
{
- return arm_compute::support::cpp14::make_unique<DummyAccessor>();
+ return std::make_unique<DummyAccessor>();
}
}
}
@@ -548,18 +548,18 @@ inline std::unique_ptr<graph::ITensorAccessor> get_output_accessor(const arm_com
ARM_COMPUTE_UNUSED(is_validation);
if(!graph_parameters.validation_file.empty())
{
- return arm_compute::support::cpp14::make_unique<ValidationOutputAccessor>(graph_parameters.validation_file,
- output_stream,
- graph_parameters.validation_range_start,
- graph_parameters.validation_range_end);
+ return std::make_unique<ValidationOutputAccessor>(graph_parameters.validation_file,
+ output_stream,
+ graph_parameters.validation_range_start,
+ graph_parameters.validation_range_end);
}
else if(graph_parameters.labels.empty())
{
- return arm_compute::support::cpp14::make_unique<DummyAccessor>(0);
+ return std::make_unique<DummyAccessor>(0);
}
else
{
- return arm_compute::support::cpp14::make_unique<TopNPredictionsAccessor>(graph_parameters.labels, top_n, output_stream);
+ return std::make_unique<TopNPredictionsAccessor>(graph_parameters.labels, top_n, output_stream);
}
}
/** Generates appropriate output accessor according to the specified graph parameters
@@ -582,18 +582,18 @@ inline std::unique_ptr<graph::ITensorAccessor> get_detection_output_accessor(con
ARM_COMPUTE_UNUSED(is_validation);
if(!graph_parameters.validation_file.empty())
{
- return arm_compute::support::cpp14::make_unique<ValidationOutputAccessor>(graph_parameters.validation_file,
- output_stream,
- graph_parameters.validation_range_start,
- graph_parameters.validation_range_end);
+ return std::make_unique<ValidationOutputAccessor>(graph_parameters.validation_file,
+ output_stream,
+ graph_parameters.validation_range_start,
+ graph_parameters.validation_range_end);
}
else if(graph_parameters.labels.empty())
{
- return arm_compute::support::cpp14::make_unique<DummyAccessor>(0);
+ return std::make_unique<DummyAccessor>(0);
}
else
{
- return arm_compute::support::cpp14::make_unique<DetectionOutputAccessor>(graph_parameters.labels, tensor_shapes, output_stream);
+ return std::make_unique<DetectionOutputAccessor>(graph_parameters.labels, tensor_shapes, output_stream);
}
}
/** Generates appropriate npy output accessor according to the specified npy_path
@@ -613,11 +613,11 @@ inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std
{
if(npy_path.empty())
{
- return arm_compute::support::cpp14::make_unique<DummyAccessor>(0);
+ return std::make_unique<DummyAccessor>(0);
}
else
{
- return arm_compute::support::cpp14::make_unique<NumPyAccessor>(npy_path, shape, data_type, data_layout, output_stream);
+ return std::make_unique<NumPyAccessor>(npy_path, shape, data_type, data_layout, output_stream);
}
}
@@ -634,11 +634,11 @@ inline std::unique_ptr<graph::ITensorAccessor> get_save_npy_output_accessor(cons
{
if(npy_name.empty())
{
- return arm_compute::support::cpp14::make_unique<DummyAccessor>(0);
+ return std::make_unique<DummyAccessor>(0);
}
else
{
- return arm_compute::support::cpp14::make_unique<SaveNumPyAccessor>(npy_name, is_fortran);
+ return std::make_unique<SaveNumPyAccessor>(npy_name, is_fortran);
}
}
@@ -650,7 +650,7 @@ inline std::unique_ptr<graph::ITensorAccessor> get_save_npy_output_accessor(cons
*/
inline std::unique_ptr<graph::ITensorAccessor> get_print_output_accessor(std::ostream &output_stream = std::cout)
{
- return arm_compute::support::cpp14::make_unique<PrintAccessor>(output_stream);
+ return std::make_unique<PrintAccessor>(output_stream);
}
/** Permutes a given tensor shape given the input and output data layout
diff --git a/utils/ImageLoader.h b/utils/ImageLoader.h
index 2dbb6f9421..5abcb7a60f 100644
--- a/utils/ImageLoader.h
+++ b/utils/ImageLoader.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 Arm Limited.
+ * Copyright (c) 2018-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -392,7 +392,7 @@ public:
ARM_COMPUTE_ERROR_ON_MSG_VAR(max_val >= 256, "2 bytes per colour channel not supported in file %s",
filename.c_str());
- _feeder = support::cpp14::make_unique<FileImageFeeder>(_fs);
+ _feeder = std::make_unique<FileImageFeeder>(_fs);
}
catch(std::runtime_error &e)
{
@@ -467,7 +467,7 @@ public:
_height = height;
_data = std::unique_ptr<uint8_t, malloc_deleter>(rgb_image);
_is_loaded = true;
- _feeder = support::cpp14::make_unique<MemoryImageFeeder>(_data.get());
+ _feeder = std::make_unique<MemoryImageFeeder>(_data.get());
}
}
void close() override
@@ -512,9 +512,9 @@ public:
switch(type)
{
case ImageType::PPM:
- return support::cpp14::make_unique<PPMLoader>();
+ return std::make_unique<PPMLoader>();
case ImageType::JPEG:
- return support::cpp14::make_unique<JPEGLoader>();
+ return std::make_unique<JPEGLoader>();
case ImageType::UNKNOWN:
default:
return nullptr;
diff --git a/utils/Utils.h b/utils/Utils.h
index e44d978b24..b10d18aca2 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -38,7 +38,6 @@
#pragma GCC diagnostic ignored "-Wstrict-overflow"
#include "libnpy/npy.hpp"
#pragma GCC diagnostic pop
-#include "support/MemorySupport.h"
#include "support/StringSupport.h"
#ifdef ARM_COMPUTE_CL
@@ -54,6 +53,7 @@
#include <cstring>
#include <fstream>
#include <iostream>
+#include <memory>
#include <random>
#include <string>
#include <tuple>
@@ -110,7 +110,7 @@ int run_example(int argc, char **argv, std::unique_ptr<Example> example);
template <typename T>
int run_example(int argc, char **argv)
{
- return run_example(argc, argv, support::cpp14::make_unique<T>());
+ return run_example(argc, argv, std::make_unique<T>());
}
/** Draw a RGB rectangular window for the detected object
diff --git a/utils/command_line/CommandLineParser.h b/utils/command_line/CommandLineParser.h
index 5881723da8..e8fabc4251 100644
--- a/utils/command_line/CommandLineParser.h
+++ b/utils/command_line/CommandLineParser.h
@@ -26,11 +26,11 @@
#include "Option.h"
#include "arm_compute/core/utils/misc/Utility.h"
-#include "support/MemorySupport.h"
#include <iostream>
#include <map>
#include <memory>
+#include <memory>
#include <regex>
#include <string>
#include <utility>
@@ -102,14 +102,14 @@ private:
template <typename T, typename... As>
inline T *CommandLineParser::add_option(const std::string &name, As &&... args)
{
- auto result = _options.emplace(name, support::cpp14::make_unique<T>(name, std::forward<As>(args)...));
+ auto result = _options.emplace(name, std::make_unique<T>(name, std::forward<As>(args)...));
return static_cast<T *>(result.first->second.get());
}
template <typename T, typename... As>
inline T *CommandLineParser::add_positional_option(As &&... args)
{
- _positional_options.emplace_back(support::cpp14::make_unique<T>(std::forward<As>(args)...));
+ _positional_options.emplace_back(std::make_unique<T>(std::forward<As>(args)...));
return static_cast<T *>(_positional_options.back().get());
}