aboutsummaryrefslogtreecommitdiff
path: root/src/graph/backends/NEON
diff options
context:
space:
mode:
Diffstat (limited to 'src/graph/backends/NEON')
-rw-r--r--src/graph/backends/NEON/NEDeviceBackend.cpp38
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp125
-rw-r--r--src/graph/backends/NEON/NENodeValidator.cpp64
-rw-r--r--src/graph/backends/NEON/NESubTensorHandle.cpp7
-rw-r--r--src/graph/backends/NEON/NETensorHandle.cpp10
5 files changed, 146 insertions, 98 deletions
diff --git a/src/graph/backends/NEON/NEDeviceBackend.cpp b/src/graph/backends/NEON/NEDeviceBackend.cpp
index 9efa3ac0c8..fc7b309803 100644
--- a/src/graph/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph/backends/NEON/NEDeviceBackend.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021,2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,18 +23,17 @@
*/
#include "arm_compute/graph/backends/NEON/NEDeviceBackend.h"
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/GraphContext.h"
-#include "arm_compute/graph/INode.h"
-#include "arm_compute/graph/Logger.h"
-#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/graph/backends/BackendRegistrar.h"
#include "arm_compute/graph/backends/NEON/NEFunctionFactory.h"
#include "arm_compute/graph/backends/NEON/NENodeValidator.h"
#include "arm_compute/graph/backends/NEON/NESubTensorHandle.h"
#include "arm_compute/graph/backends/NEON/NETensorHandle.h"
-
-#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/Logger.h"
+#include "arm_compute/graph/Tensor.h"
#include "arm_compute/runtime/Allocator.h"
#include "arm_compute/runtime/BlobLifetimeManager.h"
#include "arm_compute/runtime/IWeightsManager.h"
@@ -44,8 +43,6 @@
#include "arm_compute/runtime/PoolManager.h"
#include "arm_compute/runtime/Scheduler.h"
-#include "support/ToolchainSupport.h"
-
namespace arm_compute
{
namespace graph
@@ -55,8 +52,7 @@ namespace backends
/** Register CPU backend */
static detail::BackendRegistrar<NEDeviceBackend> NEDeviceBackend_registrar(Target::NEON);
-NEDeviceBackend::NEDeviceBackend()
- : _allocator()
+NEDeviceBackend::NEDeviceBackend() : _allocator()
{
}
@@ -74,13 +70,13 @@ void NEDeviceBackend::release_backend_context(GraphContext &ctx)
void NEDeviceBackend::setup_backend_context(GraphContext &ctx)
{
// Set number of threads
- if(ctx.config().num_threads >= 0)
+ if (ctx.config().num_threads >= 0)
{
Scheduler::get().set_num_threads(ctx.config().num_threads);
}
// Create function level memory manager
- if(ctx.memory_management_ctx(Target::NEON) == nullptr)
+ if (ctx.memory_management_ctx(Target::NEON) == nullptr)
{
MemoryManagerContext mm_ctx;
mm_ctx.target = Target::NEON;
@@ -93,7 +89,7 @@ void NEDeviceBackend::setup_backend_context(GraphContext &ctx)
}
// Create function level weights manager
- if(ctx.weights_management_ctx(Target::NEON) == nullptr)
+ if (ctx.weights_management_ctx(Target::NEON) == nullptr)
{
WeightsManagerContext wm_ctx;
wm_ctx.target = Target::NEON;
@@ -126,9 +122,10 @@ std::unique_ptr<ITensorHandle> NEDeviceBackend::create_tensor(const Tensor &tens
return std::make_unique<NETensorHandle>(info);
}
-std::unique_ptr<ITensorHandle> NEDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent)
+std::unique_ptr<ITensorHandle>
+NEDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent)
{
- if(parent == nullptr)
+ if (parent == nullptr)
{
return nullptr;
}
@@ -156,7 +153,7 @@ arm_compute::Status NEDeviceBackend::validate_node(INode &node)
std::shared_ptr<arm_compute::IMemoryManager> NEDeviceBackend::create_memory_manager(MemoryManagerAffinity affinity)
{
std::shared_ptr<ILifetimeManager> lifetime_mgr = nullptr;
- if(affinity == MemoryManagerAffinity::Buffer)
+ if (affinity == MemoryManagerAffinity::Buffer)
{
lifetime_mgr = std::make_shared<BlobLifetimeManager>();
}
@@ -175,6 +172,11 @@ std::shared_ptr<arm_compute::IWeightsManager> NEDeviceBackend::create_weights_ma
auto weights_mgr = std::make_shared<IWeightsManager>();
return weights_mgr;
}
+
+void NEDeviceBackend::sync()
+{
+ // nop
+}
} // namespace backends
} // namespace graph
} // namespace arm_compute
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index 0fc5291648..fe15d4cec1 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021,2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,17 +23,15 @@
*/
#include "arm_compute/graph/backends/NEON/NEFunctionFactory.h"
+#include "arm_compute/graph/backends/FunctionHelpers.h"
+#include "arm_compute/graph/backends/Utils.h"
#include "arm_compute/graph/Graph.h"
#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/graph/Logger.h"
-#include "arm_compute/graph/TypePrinter.h"
-#include "arm_compute/graph/backends/FunctionHelpers.h"
-#include "arm_compute/graph/backends/Utils.h"
#include "arm_compute/graph/nodes/Nodes.h"
+#include "arm_compute/graph/TypePrinter.h"
#include "arm_compute/runtime/CPP/CPPFunctions.h"
#include "arm_compute/runtime/NEON/NEFunctions.h"
-#include "support/Cast.h"
-#include "support/ToolchainSupport.h"
using namespace arm_compute::utils::cast;
@@ -90,7 +88,8 @@ struct NEFusedLayerTypes
namespace detail
{
template <>
-std::unique_ptr<IFunction> create_normalization_layer<NENormalizationLayer, NETargetInfo>(NormalizationLayerNode &node, GraphContext &ctx)
+std::unique_ptr<IFunction> create_normalization_layer<NENormalizationLayer, NETargetInfo>(NormalizationLayerNode &node,
+ GraphContext &ctx)
{
validate_node<NETargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
@@ -107,99 +106,127 @@ std::unique_ptr<IFunction> create_normalization_layer<NENormalizationLayer, NETa
// Log info
ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
- << node.name()
- << " Type: " << node.type()
- << " Target: " << NETargetInfo::TargetType
- << " Data Type: " << input->info()->data_type()
- << " Input shape: " << input->info()->tensor_shape()
- << " Output shape: " << output->info()->tensor_shape()
- << " Normalization info: " << norm_info.type()
- << std::endl);
+ << node.name() << " Type: " << node.type() << " Target: " << NETargetInfo::TargetType
+ << " Data Type: " << input->info()->data_type() << " Input shape: "
+ << input->info()->tensor_shape() << " Output shape: " << output->info()->tensor_shape()
+ << " Normalization info: " << norm_info.type() << std::endl);
- return std::move(func);
+ return func;
}
} // namespace detail
std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &ctx)
{
- if(node == nullptr)
+ if (node == nullptr)
{
return nullptr;
}
NodeType type = node->type();
- switch(type)
+ switch (type)
{
case NodeType::ActivationLayer:
- return detail::create_activation_layer<NEActivationLayer, NETargetInfo>(*polymorphic_downcast<ActivationLayerNode *>(node));
+ return detail::create_activation_layer<NEActivationLayer, NETargetInfo>(
+ *polymorphic_downcast<ActivationLayerNode *>(node));
case NodeType::ArgMinMaxLayer:
- return detail::create_arg_min_max_layer<NEArgMinMaxLayer, NETargetInfo>(*polymorphic_downcast<ArgMinMaxLayerNode *>(node));
+ return detail::create_arg_min_max_layer<NEArgMinMaxLayer, NETargetInfo>(
+ *polymorphic_downcast<ArgMinMaxLayerNode *>(node));
case NodeType::BatchNormalizationLayer:
- return detail::create_batch_normalization_layer<NEBatchNormalizationLayer, NETargetInfo>(*polymorphic_downcast<BatchNormalizationLayerNode *>(node));
+ return detail::create_batch_normalization_layer<NEBatchNormalizationLayer, NETargetInfo>(
+ *polymorphic_downcast<BatchNormalizationLayerNode *>(node));
case NodeType::ChannelShuffleLayer:
- return detail::create_channel_shuffle_layer<NEChannelShuffleLayer, NETargetInfo>(*polymorphic_downcast<ChannelShuffleLayerNode *>(node));
+ return detail::create_channel_shuffle_layer<NEChannelShuffleLayer, NETargetInfo>(
+ *polymorphic_downcast<ChannelShuffleLayerNode *>(node));
case NodeType::ConvolutionLayer:
- return detail::create_convolution_layer<NEConvolutionLayerFunctions, NETargetInfo>(*polymorphic_downcast<ConvolutionLayerNode *>(node), ctx);
+ return detail::create_convolution_layer<NEConvolutionLayerFunctions, NETargetInfo>(
+ *polymorphic_downcast<ConvolutionLayerNode *>(node), ctx);
case NodeType::DepthToSpaceLayer:
- return detail::create_depth_to_space_layer<NEDepthToSpaceLayer, NETargetInfo>(*polymorphic_downcast<DepthToSpaceLayerNode *>(node));
+ return detail::create_depth_to_space_layer<NEDepthToSpaceLayer, NETargetInfo>(
+ *polymorphic_downcast<DepthToSpaceLayerNode *>(node));
case NodeType::DeconvolutionLayer:
- return detail::create_deconvolution_layer<NEDeconvolutionLayer, NETargetInfo>(*polymorphic_downcast<DeconvolutionLayerNode *>(node), ctx);
+ return detail::create_deconvolution_layer<NEDeconvolutionLayer, NETargetInfo>(
+ *polymorphic_downcast<DeconvolutionLayerNode *>(node), ctx);
case NodeType::ConcatenateLayer:
- return detail::create_concatenate_layer<NEConcatenateLayer, NETargetInfo>(*polymorphic_downcast<ConcatenateLayerNode *>(node));
+ return detail::create_concatenate_layer<NEConcatenateLayer, NETargetInfo>(
+ *polymorphic_downcast<ConcatenateLayerNode *>(node));
case NodeType::DepthwiseConvolutionLayer:
- return detail::create_depthwise_convolution_layer<NEDepthwiseConvolutionLayer, NETargetInfo>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+ return detail::create_depthwise_convolution_layer<NEDepthwiseConvolutionLayer, NETargetInfo>(
+ *polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
case NodeType::DequantizationLayer:
- return detail::create_dequantization_layer<NEDequantizationLayer, NETargetInfo>(*polymorphic_downcast<DequantizationLayerNode *>(node));
+ return detail::create_dequantization_layer<NEDequantizationLayer, NETargetInfo>(
+ *polymorphic_downcast<DequantizationLayerNode *>(node));
case NodeType::DetectionOutputLayer:
- return detail::create_detection_output_layer<CPPDetectionOutputLayer, NETargetInfo>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
+ return detail::create_detection_output_layer<CPPDetectionOutputLayer, NETargetInfo>(
+ *polymorphic_downcast<DetectionOutputLayerNode *>(node));
case NodeType::DetectionPostProcessLayer:
- return detail::create_detection_post_process_layer<NEDetectionPostProcessLayer, NETargetInfo>(*polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
+ return detail::create_detection_post_process_layer<NEDetectionPostProcessLayer, NETargetInfo>(
+ *polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
case NodeType::EltwiseLayer:
- return detail::create_eltwise_layer<NEEltwiseFunctions, NETargetInfo>(*polymorphic_downcast<EltwiseLayerNode *>(node));
+ return detail::create_eltwise_layer<NEEltwiseFunctions, NETargetInfo>(
+ *polymorphic_downcast<EltwiseLayerNode *>(node));
case NodeType::UnaryEltwiseLayer:
- return detail::create_unary_eltwise_layer<NEUnaryEltwiseFunctions, NETargetInfo>(*polymorphic_downcast<UnaryEltwiseLayerNode *>(node));
+ return detail::create_unary_eltwise_layer<NEUnaryEltwiseFunctions, NETargetInfo>(
+ *polymorphic_downcast<UnaryEltwiseLayerNode *>(node));
case NodeType::FlattenLayer:
- return detail::create_flatten_layer<NEFlattenLayer, NETargetInfo>(*polymorphic_downcast<FlattenLayerNode *>(node));
+ return detail::create_flatten_layer<NEFlattenLayer, NETargetInfo>(
+ *polymorphic_downcast<FlattenLayerNode *>(node));
case NodeType::FullyConnectedLayer:
- return detail::create_fully_connected_layer<NEFullyConnectedLayer, NETargetInfo>(*polymorphic_downcast<FullyConnectedLayerNode *>(node), ctx);
+ return detail::create_fully_connected_layer<NEFullyConnectedLayer, NETargetInfo>(
+ *polymorphic_downcast<FullyConnectedLayerNode *>(node), ctx);
case NodeType::FusedConvolutionBatchNormalizationLayer:
- return detail::create_fused_convolution_batch_normalization_layer<NEFusedLayerTypes, NETargetInfo>(*polymorphic_downcast<FusedConvolutionBatchNormalizationNode *>(node), ctx);
+ return detail::create_fused_convolution_batch_normalization_layer<NEFusedLayerTypes, NETargetInfo>(
+ *polymorphic_downcast<FusedConvolutionBatchNormalizationNode *>(node), ctx);
case NodeType::FusedDepthwiseConvolutionBatchNormalizationLayer:
- return detail::create_fused_depthwise_convolution_batch_normalization_layer<NEFusedLayerTypes, NETargetInfo>(*polymorphic_downcast<FusedDepthwiseConvolutionBatchNormalizationNode *>(node), ctx);
+ return detail::create_fused_depthwise_convolution_batch_normalization_layer<NEFusedLayerTypes,
+ NETargetInfo>(
+ *polymorphic_downcast<FusedDepthwiseConvolutionBatchNormalizationNode *>(node), ctx);
case NodeType::L2NormalizeLayer:
- return detail::create_l2_normalize_layer<NEL2NormalizeLayer, NETargetInfo>(*polymorphic_downcast<L2NormalizeLayerNode *>(node), ctx);
+ return detail::create_l2_normalize_layer<NEL2NormalizeLayer, NETargetInfo>(
+ *polymorphic_downcast<L2NormalizeLayerNode *>(node), ctx);
case NodeType::NormalizationLayer:
- return detail::create_normalization_layer<NENormalizationLayer, NETargetInfo>(*polymorphic_downcast<NormalizationLayerNode *>(node), ctx);
+ return detail::create_normalization_layer<NENormalizationLayer, NETargetInfo>(
+ *polymorphic_downcast<NormalizationLayerNode *>(node), ctx);
case NodeType::PadLayer:
return detail::create_pad_layer<NEPadLayer, NETargetInfo>(*polymorphic_downcast<PadLayerNode *>(node));
case NodeType::PermuteLayer:
- return detail::create_permute_layer<NEPermute, NETargetInfo>(*polymorphic_downcast<PermuteLayerNode *>(node));
+ return detail::create_permute_layer<NEPermute, NETargetInfo>(
+ *polymorphic_downcast<PermuteLayerNode *>(node));
case NodeType::PoolingLayer:
- return detail::create_pooling_layer<NEPoolingLayer, NETargetInfo>(*polymorphic_downcast<PoolingLayerNode *>(node));
+ return detail::create_pooling_layer<NEPoolingLayer, NETargetInfo>(
+ *polymorphic_downcast<PoolingLayerNode *>(node));
case NodeType::PReluLayer:
- return detail::create_prelu_layer<NEPReluLayer, NETargetInfo>(*polymorphic_downcast<PReluLayerNode *>(node));
+ return detail::create_prelu_layer<NEPReluLayer, NETargetInfo>(
+ *polymorphic_downcast<PReluLayerNode *>(node));
case NodeType::PrintLayer:
return detail::create_print_layer<NETargetInfo>(*polymorphic_downcast<PrintLayerNode *>(node));
case NodeType::PriorBoxLayer:
- return detail::create_priorbox_layer<NEPriorBoxLayer, NETargetInfo>(*polymorphic_downcast<PriorBoxLayerNode *>(node));
+ return detail::create_priorbox_layer<NEPriorBoxLayer, NETargetInfo>(
+ *polymorphic_downcast<PriorBoxLayerNode *>(node));
case NodeType::QuantizationLayer:
- return detail::create_quantization_layer<NEQuantizationLayer, NETargetInfo>(*polymorphic_downcast<QuantizationLayerNode *>(node));
+ return detail::create_quantization_layer<NEQuantizationLayer, NETargetInfo>(
+ *polymorphic_downcast<QuantizationLayerNode *>(node));
case NodeType::ReductionOperationLayer:
- return detail::create_reduction_operation_layer<NEReductionOperation, NETargetInfo>(*polymorphic_downcast<ReductionLayerNode *>(node), ctx);
+ return detail::create_reduction_operation_layer<NEReductionOperation, NETargetInfo>(
+ *polymorphic_downcast<ReductionLayerNode *>(node), ctx);
case NodeType::ReorgLayer:
- return detail::create_reorg_layer<NEReorgLayer, NETargetInfo>(*polymorphic_downcast<ReorgLayerNode *>(node));
+ return detail::create_reorg_layer<NEReorgLayer, NETargetInfo>(
+ *polymorphic_downcast<ReorgLayerNode *>(node));
case NodeType::ReshapeLayer:
- return detail::create_reshape_layer<NEReshapeLayer, NETargetInfo>(*polymorphic_downcast<ReshapeLayerNode *>(node));
+ return detail::create_reshape_layer<NEReshapeLayer, NETargetInfo>(
+ *polymorphic_downcast<ReshapeLayerNode *>(node));
case NodeType::ResizeLayer:
return detail::create_resize_layer<NEScale, NETargetInfo>(*polymorphic_downcast<ResizeLayerNode *>(node));
case NodeType::SliceLayer:
return detail::create_slice_layer<NESlice, NETargetInfo>(*polymorphic_downcast<SliceLayerNode *>(node));
case NodeType::SoftmaxLayer:
- return detail::create_softmax_layer<NESoftmaxLayer, NETargetInfo>(*polymorphic_downcast<SoftmaxLayerNode *>(node), ctx);
+ return detail::create_softmax_layer<NESoftmaxLayer, NETargetInfo>(
+ *polymorphic_downcast<SoftmaxLayerNode *>(node), ctx);
case NodeType::StackLayer:
- return detail::create_stack_layer<NEStackLayer, NETargetInfo>(*polymorphic_downcast<StackLayerNode *>(node));
+ return detail::create_stack_layer<NEStackLayer, NETargetInfo>(
+ *polymorphic_downcast<StackLayerNode *>(node));
case NodeType::StridedSliceLayer:
- return detail::create_strided_slice_layer<NEStridedSlice, NETargetInfo>(*polymorphic_downcast<StridedSliceLayerNode *>(node));
+ return detail::create_strided_slice_layer<NEStridedSlice, NETargetInfo>(
+ *polymorphic_downcast<StridedSliceLayerNode *>(node));
default:
return nullptr;
}
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index a485e5d235..a97806f92c 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -25,9 +25,9 @@
#include "arm_compute/graph/backends/ValidateHelpers.h"
#include "arm_compute/graph/nodes/Nodes.h"
-
#include "arm_compute/runtime/CPP/CPPFunctions.h"
#include "arm_compute/runtime/NEON/NEFunctions.h"
+
#include "support/Cast.h"
using namespace arm_compute::utils::cast;
@@ -56,41 +56,51 @@ struct NEUnaryEltwiseLayerFunctions
Status NENodeValidator::validate(INode *node)
{
- if(node == nullptr)
+ if (node == nullptr)
{
return Status{};
}
NodeType type = node->type();
- switch(type)
+ switch (type)
{
case NodeType::ArgMinMaxLayer:
- return detail::validate_arg_min_max_layer<NEArgMinMaxLayer>(*polymorphic_downcast<ArgMinMaxLayerNode *>(node));
+ return detail::validate_arg_min_max_layer<NEArgMinMaxLayer>(
+ *polymorphic_downcast<ArgMinMaxLayerNode *>(node));
case NodeType::BoundingBoxTransformLayer:
- return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : BoundingBoxTransformLayer");
+ return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR,
+ "Unsupported operation : BoundingBoxTransformLayer");
case NodeType::ChannelShuffleLayer:
- return detail::validate_channel_shuffle_layer<NEChannelShuffleLayer>(*polymorphic_downcast<ChannelShuffleLayerNode *>(node));
+ return detail::validate_channel_shuffle_layer<NEChannelShuffleLayer>(
+ *polymorphic_downcast<ChannelShuffleLayerNode *>(node));
case NodeType::ConvolutionLayer:
- return detail::validate_convolution_layer<NEConvolutionLayer,
- NEDirectConvolutionLayer,
- NEGEMMConvolutionLayer,
- NEWinogradConvolutionLayer>(*polymorphic_downcast<ConvolutionLayerNode *>(node));
+ return detail::validate_convolution_layer<NEConvolutionLayer, NEDirectConvolutionLayer,
+ NEGEMMConvolutionLayer, NEWinogradConvolutionLayer>(
+ *polymorphic_downcast<ConvolutionLayerNode *>(node));
case NodeType::DepthToSpaceLayer:
- return detail::validate_depth_to_space_layer<NEDepthToSpaceLayer>(*polymorphic_downcast<DepthToSpaceLayerNode *>(node));
+ return detail::validate_depth_to_space_layer<NEDepthToSpaceLayer>(
+ *polymorphic_downcast<DepthToSpaceLayerNode *>(node));
case NodeType::DepthwiseConvolutionLayer:
- return detail::validate_depthwise_convolution_layer<NEDepthwiseConvolutionLayer>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+ return detail::validate_depthwise_convolution_layer<NEDepthwiseConvolutionLayer>(
+ *polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
case NodeType::DequantizationLayer:
- return detail::validate_dequantization_layer<NEDequantizationLayer>(*polymorphic_downcast<DequantizationLayerNode *>(node));
+ return detail::validate_dequantization_layer<NEDequantizationLayer>(
+ *polymorphic_downcast<DequantizationLayerNode *>(node));
case NodeType::DetectionOutputLayer:
- return detail::validate_detection_output_layer<CPPDetectionOutputLayer>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
+ return detail::validate_detection_output_layer<CPPDetectionOutputLayer>(
+ *polymorphic_downcast<DetectionOutputLayerNode *>(node));
case NodeType::DetectionPostProcessLayer:
- return detail::validate_detection_post_process_layer<NEDetectionPostProcessLayer>(*polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
+ return detail::validate_detection_post_process_layer<NEDetectionPostProcessLayer>(
+ *polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
case NodeType::GenerateProposalsLayer:
- return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : GenerateProposalsLayer");
+ return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR,
+ "Unsupported operation : GenerateProposalsLayer");
case NodeType::L2NormalizeLayer:
- return detail::validate_l2_normalize_layer<NEL2NormalizeLayer>(*polymorphic_downcast<L2NormalizeLayerNode *>(node));
+ return detail::validate_l2_normalize_layer<NEL2NormalizeLayer>(
+ *polymorphic_downcast<L2NormalizeLayerNode *>(node));
case NodeType::NormalizePlanarYUVLayer:
- return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : NormalizePlanarYUVLayer");
+ return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR,
+ "Unsupported operation : NormalizePlanarYUVLayer");
case NodeType::PadLayer:
return detail::validate_pad_layer<NEPadLayer>(*polymorphic_downcast<PadLayerNode *>(node));
case NodeType::PermuteLayer:
@@ -100,23 +110,29 @@ Status NENodeValidator::validate(INode *node)
case NodeType::PriorBoxLayer:
return detail::validate_priorbox_layer<NEPriorBoxLayer>(*polymorphic_downcast<PriorBoxLayerNode *>(node));
case NodeType::QuantizationLayer:
- return detail::validate_quantization_layer<NEQuantizationLayer>(*polymorphic_downcast<QuantizationLayerNode *>(node));
+ return detail::validate_quantization_layer<NEQuantizationLayer>(
+ *polymorphic_downcast<QuantizationLayerNode *>(node));
case NodeType::ReductionOperationLayer:
- return detail::validate_reduction_operation_layer<NEReductionOperation>(*polymorphic_downcast<ReductionLayerNode *>(node));
+ return detail::validate_reduction_operation_layer<NEReductionOperation>(
+ *polymorphic_downcast<ReductionLayerNode *>(node));
case NodeType::ReorgLayer:
return detail::validate_reorg_layer<NEReorgLayer>(*polymorphic_downcast<ReorgLayerNode *>(node));
case NodeType::ReshapeLayer:
return detail::validate_reshape_layer<NEReshapeLayer>(*polymorphic_downcast<ReshapeLayerNode *>(node));
case NodeType::ROIAlignLayer:
- return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : ROIAlignLayer");
+ return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR,
+ "Unsupported operation : ROIAlignLayer");
case NodeType::SliceLayer:
return detail::validate_slice_layer<NESlice>(*polymorphic_downcast<SliceLayerNode *>(node));
case NodeType::StridedSliceLayer:
- return detail::validate_strided_slice_layer<NEStridedSlice>(*polymorphic_downcast<StridedSliceLayerNode *>(node));
+ return detail::validate_strided_slice_layer<NEStridedSlice>(
+ *polymorphic_downcast<StridedSliceLayerNode *>(node));
case NodeType::EltwiseLayer:
- return detail::validate_eltwise_Layer<NEEltwiseLayerFunctions>(*polymorphic_downcast<EltwiseLayerNode *>(node));
+ return detail::validate_eltwise_Layer<NEEltwiseLayerFunctions>(
+ *polymorphic_downcast<EltwiseLayerNode *>(node));
case NodeType::UnaryEltwiseLayer:
- return detail::validate_unary_eltwise_layer<NEUnaryEltwiseLayerFunctions>(*polymorphic_downcast<UnaryEltwiseLayerNode *>(node));
+ return detail::validate_unary_eltwise_layer<NEUnaryEltwiseLayerFunctions>(
+ *polymorphic_downcast<UnaryEltwiseLayerNode *>(node));
default:
return Status{};
}
diff --git a/src/graph/backends/NEON/NESubTensorHandle.cpp b/src/graph/backends/NEON/NESubTensorHandle.cpp
index 36f29d0d10..8964a00c5e 100644
--- a/src/graph/backends/NEON/NESubTensorHandle.cpp
+++ b/src/graph/backends/NEON/NESubTensorHandle.cpp
@@ -29,7 +29,10 @@ namespace graph
{
namespace backends
{
-NESubTensorHandle::NESubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords, bool extend_parent)
+NESubTensorHandle::NESubTensorHandle(ITensorHandle *parent_handle,
+ const TensorShape &shape,
+ const Coordinates &coords,
+ bool extend_parent)
: _sub_tensor(), _parent_handle(nullptr)
{
ARM_COMPUTE_ERROR_ON(!parent_handle);
@@ -95,4 +98,4 @@ Target NESubTensorHandle::target() const
}
} // namespace backends
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/backends/NEON/NETensorHandle.cpp b/src/graph/backends/NEON/NETensorHandle.cpp
index 4393156e8a..dabf67060d 100644
--- a/src/graph/backends/NEON/NETensorHandle.cpp
+++ b/src/graph/backends/NEON/NETensorHandle.cpp
@@ -24,6 +24,7 @@
#include "arm_compute/graph/backends/NEON/NETensorHandle.h"
#include "arm_compute/runtime/MemoryGroup.h"
+
#include "support/Cast.h"
namespace arm_compute
@@ -32,8 +33,7 @@ namespace graph
{
namespace backends
{
-NETensorHandle::NETensorHandle(const ITensorInfo &info)
- : _tensor()
+NETensorHandle::NETensorHandle(const ITensorInfo &info) : _tensor()
{
_tensor.allocator()->init(info);
}
@@ -50,7 +50,7 @@ void NETensorHandle::free()
void NETensorHandle::manage(IMemoryGroup *mg)
{
- if(mg != nullptr)
+ if (mg != nullptr)
{
mg->manage(&_tensor);
}
@@ -68,7 +68,7 @@ void NETensorHandle::unmap()
void NETensorHandle::release_if_unused()
{
// TODO (geopin01): Release tensor only if all sub-tensors are marked as not used
- if(!_tensor.is_used())
+ if (!_tensor.is_used())
{
_tensor.allocator()->free();
}
@@ -100,4 +100,4 @@ Target NETensorHandle::target() const
}
} // namespace backends
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute