diff options
author | ramelg01 <ramy.elgammal@arm.com> | 2021-10-02 14:34:36 +0100 |
---|---|---|
committer | ramy.elgammal <ramy.elgammal@arm.com> | 2021-10-04 17:51:37 +0000 |
commit | 4a6d9e85a9cb2e199d20b06e5450036c3b83b91d (patch) | |
tree | dcfa7986053d80c16ca3edb5ef0bd38fba501ba4 /arm_compute | |
parent | 6d891575e0c4432e170db7746037934299a0f2ca (diff) | |
download | ComputeLibrary-4a6d9e85a9cb2e199d20b06e5450036c3b83b91d.tar.gz |
Provide logging for configure functions in all CPP functions
- Moving impl of CPPSplit template to src/runtime/CPP to allow
including of Log.h from src/common.
- Fix logging of vector<ITensor*> to print contained tensor's info not their ptrs.
Partially-Resovles: COMPMID-4718
Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com>
Change-Id: Idec81665b2a7c0cfae5248803109c6e2edc520a1
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6362
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r-- | arm_compute/runtime/CPP/functions/CPPSplit.h | 135 |
1 files changed, 5 insertions, 130 deletions
diff --git a/arm_compute/runtime/CPP/functions/CPPSplit.h b/arm_compute/runtime/CPP/functions/CPPSplit.h index b2b4d07c86..b797b26960 100644 --- a/arm_compute/runtime/CPP/functions/CPPSplit.h +++ b/arm_compute/runtime/CPP/functions/CPPSplit.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020 Arm Limited. + * Copyright (c) 2020-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -41,10 +41,8 @@ template <typename SliceType, typename TensorInterfaceType = ITensor> class CPPSplit : public IFunction { public: - CPPSplit() - : _outputs_vector(), _slice_functions(), _num_outputs(0) - { - } + CPPSplit(); + /** Static function to check if given info will lead to a valid configuration of @ref CPPSplit * * @param[in] input The input tensor info. Data types supported: All. @@ -55,72 +53,7 @@ public: * * @return a status */ - static Status validate(const ITensorInfo *input, const std::vector<ITensorInfo *> &outputs, unsigned int axis) - { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); - ARM_COMPUTE_RETURN_ERROR_ON(axis >= input->num_dimensions()); - ARM_COMPUTE_RETURN_ERROR_ON(outputs.size() < 2); - - // Get output shape - TensorShape output_shape{}; - unsigned int total_output_shape_size = 0; - - // Sum the output sizes and fall back to evenly-sized splits if any are zero - const bool using_split_shapes = std::none_of(outputs.begin(), outputs.end(), [&total_output_shape_size](ITensorInfo * info) - { - unsigned int output_shape_size = info->tensor_shape().total_size(); - total_output_shape_size += output_shape_size; - return output_shape_size == 0; - }); - - if(using_split_shapes) - { - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().total_size() != total_output_shape_size); - } - else - { - output_shape = arm_compute::misc::shape_calculator::compute_split_shape(input, axis, outputs.size()); - ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() == 0); - } - - // Validate output tensors - unsigned int axis_offset = 0; - for(const auto &output : outputs) - { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); - if(using_split_shapes) - { - output_shape = output->tensor_shape(); - ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() == 0); - } - - const size_t axis_split_step = output_shape[axis]; - - // Start/End coordinates - Coordinates start_coords; - Coordinates end_coords; - for(unsigned int d = 0; d < output_shape.num_dimensions(); ++d) - { - end_coords.set(d, -1); - } - - // Output auto inizialitation if not yet initialized - TensorInfo tmp_output_info = *output->clone(); - if(tmp_output_info.tensor_shape().total_size() == 0) - { - tmp_output_info = input->clone()->set_is_resizable(true).set_tensor_shape(output_shape); - } - - // Update coordinate on axis - start_coords.set(axis, axis_offset); - end_coords.set(axis, axis_offset + axis_split_step); - - ARM_COMPUTE_RETURN_ON_ERROR(SliceType::validate(input, output, start_coords, end_coords)); - axis_offset += axis_split_step; - } - - return Status{}; - } + static Status validate(const ITensorInfo *input, const std::vector<ITensorInfo *> &outputs, unsigned int axis); /** Initialise the kernel's input and outputs. * @@ -130,65 +63,7 @@ public: * from the split dimension. * @param[in] axis Axis on which to split the input. */ - void configure(const TensorInterfaceType *input, const std::vector<TensorInterfaceType *> &outputs, unsigned int axis) - { - // Create Slice functions - _num_outputs = outputs.size(); - _slice_functions.resize(_num_outputs); - - // Extract output tensor info - std::vector<ITensorInfo *> outputs_info; - for(auto &output : outputs) - { - ARM_COMPUTE_ERROR_ON_NULLPTR(output); - outputs_info.emplace_back(output->info()); - } - - // If any of the outputs have a zero size, fall-back to using evenly-sized output splits - const bool outputs_have_sizes = std::none_of(outputs_info.begin(), outputs_info.end(), [](ITensorInfo * info) - { - return info->tensor_shape().total_size() == 0; - }); - - // Validate - ARM_COMPUTE_ERROR_THROW_ON(CPPSplit::validate(input->info(), outputs_info, axis)); - - unsigned int axis_offset = 0; - unsigned int i = 0; - - for(const auto &output_info : outputs_info) - { - // Get output shape - TensorShape output_shape = (outputs_have_sizes ? - output_info->tensor_shape() : - arm_compute::misc::shape_calculator::compute_split_shape(input->info(), axis, _num_outputs)); - - const size_t axis_split_step = output_shape[axis]; - - // Start/End coordinates - Coordinates start_coords; - Coordinates end_coords; - - for(unsigned int d = 0; d < output_shape.num_dimensions(); ++d) - { - end_coords.set(d, -1); - } - - // Update coordinate on axis - start_coords.set(axis, axis_offset); - end_coords.set(axis, axis_offset + axis_split_step); - - // Configure slice function - _slice_functions[i].configure(input, outputs[i], start_coords, end_coords); - - // Set valid region from shape - outputs[i]->info()->set_valid_region(ValidRegion(Coordinates(), output_shape)); - - // Update axis offset - axis_offset += axis_split_step; - ++i; - } - } + void configure(const TensorInterfaceType *input, const std::vector<TensorInterfaceType *> &outputs, unsigned int axis); protected: std::vector<TensorInterfaceType *> _outputs_vector; |