From f932d2c8409831cb9cb97a2eb65be93ad4709cd6 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Mon, 6 Jul 2020 11:27:21 +0100 Subject: COMPMID-3386: Support memory injection in CLConcatenate functions/kernels Signed-off-by: Georgios Pinitas Change-Id: I611adf4f506d406540e920b0bd6befb4b5108918 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3601 Tested-by: Arm Jenkins Reviewed-by: Michalis Spyrou Comments-Addressed: Arm Jenkins --- src/runtime/CL/functions/CLConcatenateLayer.cpp | 152 +++++++++++++++++------- src/runtime/CL/functions/CLLSTMLayer.cpp | 26 ++-- 2 files changed, 119 insertions(+), 59 deletions(-) (limited to 'src/runtime/CL') diff --git a/src/runtime/CL/functions/CLConcatenateLayer.cpp b/src/runtime/CL/functions/CLConcatenateLayer.cpp index a4e8665d10..06903d2ff2 100644 --- a/src/runtime/CL/functions/CLConcatenateLayer.cpp +++ b/src/runtime/CL/functions/CLConcatenateLayer.cpp @@ -40,6 +40,8 @@ namespace arm_compute { +namespace experimental +{ CLConcatenateLayer::CLConcatenateLayer() : _concat_kernels(), _num_inputs(0), @@ -47,54 +49,23 @@ CLConcatenateLayer::CLConcatenateLayer() { } -void CLConcatenateLayer::configure(std::vector &inputs_vector, ICLTensor *output, size_t axis) -{ - configure(CLKernelLibrary::get().get_compile_context(), inputs_vector, output, axis); -} - -void CLConcatenateLayer::configure(const CLCompileContext &compile_context, std::vector &inputs_vector, ICLTensor *output, size_t axis) -{ - configure_internal(compile_context, std::move(inputs_vector), output, axis); -} - -void CLConcatenateLayer::configure(std::vector &inputs_vector, ICLTensor *output, size_t axis) -{ - configure(CLKernelLibrary::get().get_compile_context(), inputs_vector, output, axis); -} - -void CLConcatenateLayer::configure(const CLCompileContext &compile_context, std::vector &inputs_vector, ICLTensor *output, size_t axis) -{ - configure_internal(compile_context, std::move(inputs_vector), output, axis); -} - -Status CLConcatenateLayer::validate(const std::vector &inputs_vector, const ITensorInfo *output, size_t axis) -{ - return validate_internal(inputs_vector, output, axis); -} - -Status CLConcatenateLayer::validate(const std::vector &inputs_vector, const ITensorInfo *output, size_t axis) -{ - return validate_internal(inputs_vector, output, axis); -} - -template -void CLConcatenateLayer::configure_internal(const CLCompileContext &compile_context, std::vector &&inputs_vector, ICLTensor *output, size_t axis) +void CLConcatenateLayer::configure(const CLCompileContext &compile_context, const std::vector &inputs_vector, ITensorInfo *output, size_t axis) { ARM_COMPUTE_ERROR_ON(output == nullptr); _axis = axis; _num_inputs = inputs_vector.size(); - std::vector inputs_vector_info(inputs_vector.size()); - std::transform(inputs_vector.begin(), inputs_vector.end(), inputs_vector_info.begin(), [](TensorType * t) + TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); + std::vector const_inputs_vector(inputs_vector.size()); + std::transform(inputs_vector.begin(), inputs_vector.end(), const_inputs_vector.begin(), [](ITensorInfo * t) { ARM_COMPUTE_ERROR_ON_NULLPTR(t); - return t->info(); + return t; }); - TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); // Output auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); - ARM_COMPUTE_ERROR_THROW_ON(CLConcatenateLayer::validate(inputs_vector_info, output->info(), axis)); + auto_init_if_empty(*output, output_shape, 1, inputs_vector[0]->data_type()); + ARM_COMPUTE_ERROR_THROW_ON(CLConcatenateLayer::validate(const_inputs_vector, output, axis)); unsigned int offset = 0; switch(_axis) @@ -126,7 +97,7 @@ void CLConcatenateLayer::configure_internal(const CLCompileContext &compile_cont { auto kernel = support::cpp14::make_unique(); kernel->configure(compile_context, inputs_vector.at(i), offset, output); - offset += inputs_vector.at(i)->info()->dimension(_axis); + offset += inputs_vector.at(i)->dimension(_axis); _concat_kernels.emplace_back(std::move(kernel)); } break; @@ -140,7 +111,7 @@ void CLConcatenateLayer::configure_internal(const CLCompileContext &compile_cont { auto kernel = support::cpp14::make_unique(); kernel->configure(compile_context, inputs_vector.at(i), offset, output); - offset += inputs_vector.at(i)->info()->dimension(_axis); + offset += inputs_vector.at(i)->dimension(_axis); _concat_kernels.emplace_back(std::move(kernel)); } break; @@ -151,7 +122,7 @@ void CLConcatenateLayer::configure_internal(const CLCompileContext &compile_cont { auto kernel = support::cpp14::make_unique(); kernel->configure(compile_context, inputs_vector.at(i), offset, output); - offset += inputs_vector.at(i)->info()->dimension(_axis); + offset += inputs_vector.at(i)->dimension(_axis); _concat_kernels.emplace_back(std::move(kernel)); } break; @@ -162,7 +133,7 @@ void CLConcatenateLayer::configure_internal(const CLCompileContext &compile_cont { auto kernel = support::cpp14::make_unique(); kernel->configure(compile_context, inputs_vector.at(i), offset, output); - offset += inputs_vector.at(i)->info()->dimension(_axis); + offset += inputs_vector.at(i)->dimension(_axis); _concat_kernels.emplace_back(std::move(kernel)); } break; @@ -172,8 +143,7 @@ void CLConcatenateLayer::configure_internal(const CLCompileContext &compile_cont } } -template -Status CLConcatenateLayer::validate_internal(const std::vector &inputs_vector, const ITensorInfo *output, size_t axis) +Status CLConcatenateLayer::validate(const std::vector &inputs_vector, const ITensorInfo *output, size_t axis) { ARM_COMPUTE_RETURN_ERROR_ON(output == nullptr); const unsigned int num_inputs = inputs_vector.size(); @@ -250,11 +220,101 @@ Status CLConcatenateLayer::validate_internal(const std::vector return Status{}; } +MemoryRequirements CLConcatenateLayer::workspace() const +{ + return MemoryRequirements{}; +} + +void CLConcatenateLayer::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) +{ + ARM_COMPUTE_UNUSED(workspace); + + if(inputs.empty() || outputs.empty()) + { + ARM_COMPUTE_ERROR("No inputs provided"); + } + + if(inputs.size() != _num_inputs) + { + ARM_COMPUTE_ERROR("Configured with different number of inputs"); + } + + if(_axis == Window::DimX && (_num_inputs == 2 || _num_inputs == 4)) + { + ARM_COMPUTE_ERROR_ON(_concat_kernels.empty()); + CLScheduler::get().enqueue_op(*_concat_kernels.at(0), inputs, outputs, true); + } + else + { + int i = 0; + for(auto &k : _concat_kernels) + { + const InputTensorMap input = { { TensorType::ACL_SRC, inputs.at(ACL_SRC_VEC + i) } }; + CLScheduler::get().enqueue_op(*k, input, outputs, true); + ++i; + } + } +} +} // namespace experimental + +struct CLConcatenateLayer::Impl +{ + std::vector srcs{}; + ICLTensor *dst{ nullptr }; + unsigned int num_inputs{ 0 }; + unsigned int axis{ 0 }; + std::unique_ptr op{ nullptr }; +}; + +CLConcatenateLayer::CLConcatenateLayer() + : _impl(support::cpp14::make_unique()) +{ +} + +CLConcatenateLayer::CLConcatenateLayer(CLConcatenateLayer &&) = default; + +CLConcatenateLayer &CLConcatenateLayer::operator=(CLConcatenateLayer &&) = default; + +CLConcatenateLayer::~CLConcatenateLayer() = default; + +void CLConcatenateLayer::configure(std::vector &inputs_vector, ICLTensor *output, size_t axis) +{ + configure(CLKernelLibrary::get().get_compile_context(), inputs_vector, output, axis); +} + +void CLConcatenateLayer::configure(const CLCompileContext &compile_context, std::vector &inputs_vector, ICLTensor *output, size_t axis) +{ + ARM_COMPUTE_ERROR_ON(output == nullptr); + + _impl->srcs = inputs_vector; + _impl->dst = output; + _impl->axis = axis; + _impl->num_inputs = inputs_vector.size(); + _impl->op = arm_compute::support::cpp14::make_unique(); + + std::vector inputs_vector_info; + for(unsigned int i = 0; i < inputs_vector.size(); ++i) + { + ARM_COMPUTE_ERROR_ON_NULLPTR(inputs_vector.at(i)); + inputs_vector_info.emplace_back(inputs_vector.at(i)->info()); + } + _impl->op->configure(compile_context, inputs_vector_info, _impl->dst->info(), axis); +} + +Status CLConcatenateLayer::validate(const std::vector &inputs_vector, const ITensorInfo *output, size_t axis) +{ + return experimental::CLConcatenateLayer::validate(inputs_vector, output, axis); +} + void CLConcatenateLayer::run() { - for(auto &kernel : _concat_kernels) + InputTensorMap srcs; + for(unsigned i = 0; i < _impl->num_inputs; ++i) { - CLScheduler::get().enqueue(*kernel, true); + srcs.insert(std::make_pair(TensorType::ACL_SRC_VEC + i, _impl->srcs.at(i))); } + const OutputTensorMap dst{ { TensorType::ACL_DST, _impl->dst } }; + + _impl->op->run(srcs, dst, {}); } } // namespace arm_compute diff --git a/src/runtime/CL/functions/CLLSTMLayer.cpp b/src/runtime/CL/functions/CLLSTMLayer.cpp index c07d7bc5c4..1b46baaf5e 100644 --- a/src/runtime/CL/functions/CLLSTMLayer.cpp +++ b/src/runtime/CL/functions/CLLSTMLayer.cpp @@ -110,7 +110,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe _forget_gate_out2.allocator()->init(TensorInfo(concat_shape, 1, input->info()->data_type())); _memory_group.manage(&_forget_gate_out2); - _concat_inputs_forget_gate.configure(compile_context, input, output_state_in, &_forget_gate_out2); + _concat_inputs_forget_gate.configure(compile_context, inputs_vector, &_forget_gate_out2, Window::DimX); std::vector weights_vector; @@ -119,7 +119,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe const TensorShape weights_concat_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(weights_vector, 0); _forget_gate_out6.allocator()->init(TensorInfo(weights_concat_shape, 1, input->info()->data_type())); - _concat_weights_forget_gate.configure(compile_context, input_to_forget_weights, recurrent_to_forget_weights, &_forget_gate_out6); + _concat_weights_forget_gate.configure(compile_context, weights_vector, &_forget_gate_out6, Window::DimX); _memory_group.manage(&_forget_gate_out5); _fully_connected_forget_gate.configure(compile_context, &_forget_gate_out2, &_forget_gate_out6, (_is_layer_norm_lstm) ? nullptr : forget_gate_bias, &_forget_gate_out5); @@ -188,7 +188,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe TensorShape lstm_weights_concat_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(lstm_weights, 0); _input_gate_out2.allocator()->init(TensorInfo(lstm_weights_concat_shape, 1, input->info()->data_type())); - _concat_weights_input_gate.configure(compile_context, lstm_params.input_to_input_weights(), lstm_params.recurrent_to_input_weights(), &_input_gate_out2); + _concat_weights_input_gate.configure(compile_context, lstm_weights, &_input_gate_out2, Window::DimX); _memory_group.manage(&_input_gate_out1); @@ -290,7 +290,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe TensorShape in_out_weights_concat_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(in_out_weights, 0); _output2.allocator()->init(TensorInfo(in_out_weights_concat_shape, 1, input->info()->data_type())); - _concat_weights_output.configure(compile_context, input_to_output_weights, recurrent_to_output_weights, &_output2); + _concat_weights_output.configure(compile_context, in_out_weights, &_output2, Window::DimX); _memory_group.manage(&_output1); _memory_group.manage(&_output4); @@ -371,7 +371,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe _copy_output.configure(compile_context, output_state_out, output); // Vector for holding the tensors to store in scratch buffer - std::vector scratch_inputs; + std::vector scratch_inputs; if(!lstm_params.has_cifg_opt()) { scratch_inputs.emplace_back(input_gate_out); @@ -485,7 +485,7 @@ Status CLLSTMLayer::validate(const ITensorInfo *input, const TensorShape concat_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, 0); TensorInfo forget_gate_concat = TensorInfo(concat_shape, 1, input->data_type()); - ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate2TensorsKernel::validate(input, output_state_in, &forget_gate_concat)); + ARM_COMPUTE_RETURN_ON_ERROR(CLConcatenateLayer::validate(inputs_vector, &forget_gate_concat, Window::DimX)); if(lstm_params.has_peephole_opt()) { @@ -516,7 +516,7 @@ Status CLLSTMLayer::validate(const ITensorInfo *input, lstm_weights.emplace_back(lstm_params.recurrent_to_input_weights()); TensorShape lstm_weights_concat_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(lstm_weights, 0); TensorInfo lstm_gate_concat = TensorInfo(lstm_weights_concat_shape, 1, input->data_type()); - ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate2TensorsKernel::validate(lstm_params.input_to_input_weights(), lstm_params.recurrent_to_input_weights(), &lstm_gate_concat)); + ARM_COMPUTE_RETURN_ON_ERROR(CLConcatenateLayer::validate(lstm_weights, &lstm_gate_concat, Window::DimX)); ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, lstm_params.input_to_input_weights(), (lstm_params.use_layer_norm()) ? nullptr : lstm_params.input_gate_bias(), &input_gate)); @@ -567,7 +567,7 @@ Status CLLSTMLayer::validate(const ITensorInfo *input, in_out_weights.emplace_back(recurrent_to_output_weights); TensorShape in_out_weights_concat_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(in_out_weights, 0); TensorInfo in_out_gate_concat = TensorInfo(in_out_weights_concat_shape, 1, input->data_type()); - ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate2TensorsKernel::validate(input_to_output_weights, recurrent_to_output_weights, &in_out_gate_concat)); + ARM_COMPUTE_RETURN_ON_ERROR(CLConcatenateLayer::validate(in_out_weights, &in_out_gate_concat, Window::DimX)); // Validate output gate tmp ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, input_to_output_weights, (lstm_params.use_layer_norm()) ? nullptr : output_gate_bias, &output_gate_tmp)); @@ -604,7 +604,7 @@ Status CLLSTMLayer::validate(const ITensorInfo *input, ARM_COMPUTE_RETURN_ON_ERROR(CLCopyKernel::validate(output_state_out, output)); // Validate scratch concatenation - std::vector inputs_vector_info_raw; + std::vector inputs_vector_info_raw; if(!lstm_params.has_cifg_opt()) { inputs_vector_info_raw.push_back(&input_gate); @@ -623,7 +623,7 @@ void CLLSTMLayer::run() MemoryGroupResourceScope scope_mg(_memory_group); - CLScheduler::get().enqueue(_concat_inputs_forget_gate); + _concat_inputs_forget_gate.run(); _fully_connected_forget_gate.run(); @@ -721,12 +721,12 @@ void CLLSTMLayer::prepare() { if(!_is_prepared) { - CLScheduler::get().enqueue(_concat_weights_forget_gate); + _concat_weights_forget_gate.run(); if(!_run_cifg_opt) { - CLScheduler::get().enqueue(_concat_weights_input_gate); + _concat_weights_input_gate.run(); } - CLScheduler::get().enqueue(_concat_weights_output); + _concat_weights_output.run(); _is_prepared = true; } } -- cgit v1.2.1