diff options
Diffstat (limited to 'src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp')
-rw-r--r-- | src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp | 36 |
1 files changed, 31 insertions, 5 deletions
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp index f43e100565..084c4df718 100644 --- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp @@ -90,8 +90,8 @@ void CLConvolutionLayerReshapeWeights::run() } CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager) - : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _original_weights(nullptr), - _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _is_quantized(false), _is_first_run(true) + : _memory_group(memory_manager), _reshape_weights(), _im2col_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _col2im_kernel(), _activationlayer_function(), + _original_weights(nullptr), _im2col_output(), _weights_reshaped(), _gemm_output(), _tmp_output(), _is_quantized(false), _is_first_run(true), _is_activationlayer_enabled(false) { } @@ -152,7 +152,7 @@ Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens } void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info, - const Size2D &dilation) + const Size2D &dilation, const ActivationLayerInfo &act_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); @@ -162,7 +162,8 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * output->info(), conv_info, weights_info, - dilation)); + dilation, + act_info)); _is_first_run = true; _original_weights = weights; @@ -260,11 +261,19 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * // Allocate intermediate tensor _weights_reshaped.allocator()->allocate(); + //Configure Activation Layer + _is_activationlayer_enabled = act_info.enabled(); + + if(_is_activationlayer_enabled) + { + _activationlayer_function.configure(output, nullptr, act_info); + } + ARM_COMPUTE_UNUSED(weights_info); } Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, - const WeightsInfo &weights_info, const Size2D &dilation) + const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!"); @@ -274,6 +283,11 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) != input->dimension(2)); ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4); + if(act_info.enabled()) + { + ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a()); + } + const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); const bool append_bias = (biases != nullptr) && (!is_quantized); const unsigned bias_element = (append_bias) ? 1 : 0; @@ -343,6 +357,12 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); } + //Validate Activation Layer + if(act_info.enabled()) + { + CLActivationLayer::validate(output, nullptr, act_info); + } + return Status{}; } @@ -383,5 +403,11 @@ void CLGEMMConvolutionLayer::run() // Reshape output matrix CLScheduler::get().enqueue(_col2im_kernel, false); + //Run Activation Layer if enabled + if(_is_activationlayer_enabled) + { + _activationlayer_function.run(); + } + _memory_group.release(); } |