diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp | 10 | ||||
-rw-r--r-- | src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp | 6 | ||||
-rw-r--r-- | src/core/GPUTarget.cpp | 10 | ||||
-rw-r--r-- | src/runtime/CL/functions/CLGEMM.cpp | 2 | ||||
-rw-r--r-- | src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp | 5 | ||||
-rw-r--r-- | src/runtime/CL/tuners/BifrostTuner.cpp | 28 |
6 files changed, 51 insertions, 10 deletions
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp index 6de97d40af..c3d514adb4 100644 --- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp +++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp @@ -93,8 +93,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, inline bool can_run_optimized_kernel_for_bifrost(GPUTarget gpu_target, unsigned int conv_stride_x, unsigned int conv_stride_y, unsigned int kernel_size, DataType data_type, DataLayout data_layout) { - return gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76) && (kernel_size <= 5) - && (conv_stride_x == 1) && (conv_stride_y == 1) && (data_type == DataType::F32) && (data_layout == DataLayout::NCHW); + return gpu_target_is_in(gpu_target, + GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, + GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, + GPUTarget::G52, GPUTarget::G52LIT) + && (kernel_size <= 5) + && (conv_stride_x == 1) && (conv_stride_y == 1) + && (data_type == DataType::F32) + && (data_layout == DataLayout::NCHW); } inline void setup_num_elems(unsigned int &num_elems_read_per_iteration_x, unsigned int &num_elems_read_per_iteration_y, diff --git a/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp index 2f1f1bf865..93332de9d1 100644 --- a/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp @@ -52,7 +52,11 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *accum, ITen unsigned int &num_elems_processed_per_iteration) { // Select the vector size to use (8 for Bifrost; 16 for Midgard). - num_elems_processed_per_iteration = gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76) ? 8 : 16; + bool is_gpu_bifrost = gpu_target_is_in(gpu_target, + GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, + GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, + GPUTarget::G52, GPUTarget::G52LIT); + num_elems_processed_per_iteration = is_gpu_bifrost ? 8 : 16; // Configure kernel window Window win = calculate_max_window(*accum, Steps(num_elems_processed_per_iteration)); diff --git a/src/core/GPUTarget.cpp b/src/core/GPUTarget.cpp index a14a9c9971..78e2df1599 100644 --- a/src/core/GPUTarget.cpp +++ b/src/core/GPUTarget.cpp @@ -51,6 +51,14 @@ arm_compute::GPUTarget get_bifrost_target(const std::string &version) { return arm_compute::GPUTarget::G51LIT; } + else if(version == "G52") + { + return arm_compute::GPUTarget::G52; + } + else if(version == "G52LIT") + { + return arm_compute::GPUTarget::G52LIT; + } else if(version == "G76") { return arm_compute::GPUTarget::G76; @@ -106,6 +114,8 @@ const std::string &string_from_target(GPUTarget target) { GPUTarget::G51, "g51" }, { GPUTarget::G51BIG, "g51big" }, { GPUTarget::G51LIT, "g51lit" }, + { GPUTarget::G52, "g52" }, + { GPUTarget::G52LIT, "g52lit" }, { GPUTarget::G76, "g76" }, { GPUTarget::TTRX, "ttrx" }, { GPUTarget::TBOX, "tbox" } diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp index 85d90a05e1..9dbfd3e153 100644 --- a/src/runtime/CL/functions/CLGEMM.cpp +++ b/src/runtime/CL/functions/CLGEMM.cpp @@ -44,7 +44,7 @@ inline bool is_interleaved_transposed(int m, int n, int k, DataType data_type, b { bool flag = true; - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G76)) + if(gpu_target_is_in(gpu_target, GPUTarget::G52, GPUTarget::G52LIT, GPUTarget::G71, GPUTarget::G72, GPUTarget::G76)) { // COMPMID-852 if(k > 256 && m > 4 && is_data_type_float(data_type) && reshape_b_only_on_first_run) diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp index c2e18a760a..763ebced83 100644 --- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp @@ -41,7 +41,10 @@ inline bool is_interleaved_transposed(int m, int n, int k, bool reshape_b_only_o { bool flag = true; - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76)) + if(gpu_target_is_in(gpu_target, + GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, + GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, + GPUTarget::G52, GPUTarget::G52LIT)) { // COMPMID-852 if(k > 256 && m > 4 && reshape_b_only_on_first_run) diff --git a/src/runtime/CL/tuners/BifrostTuner.cpp b/src/runtime/CL/tuners/BifrostTuner.cpp index 59d73b4e79..187f52fcf7 100644 --- a/src/runtime/CL/tuners/BifrostTuner.cpp +++ b/src/runtime/CL/tuners/BifrostTuner.cpp @@ -132,7 +132,10 @@ void tune_col2im_kernel(CLCol2ImKernel &k) // Configure the local work size for Bifrost with a value obtained // via exhaustive autotuning over 30 representative tensor shapes. - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76)) + if(gpu_target_is_in(gpu_target, + GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, + GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, + GPUTarget::G52, GPUTarget::G52LIT)) { if((k._convolved_dims.width == 7) || (k._convolved_dims.width == 14)) { @@ -153,7 +156,11 @@ void tune_im2col_kernel(CLIm2ColKernel &k) const GPUTarget gpu_target = k.get_target(); // Local work size optimized for the 11x11 AlexNet convolution on Bifrost. - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76) && k._kernel_dims.width == 11) + if(gpu_target_is_in(gpu_target, + GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, + GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, + GPUTarget::G52, GPUTarget::G52LIT) + && k._kernel_dims.width == 11) { const bool is_square_kernel = (k._kernel_dims.width == k._kernel_dims.height); if(!is_square_kernel && k._kernel_dims.width > 1 && !k._conv_info.has_padding()) @@ -171,7 +178,10 @@ void tune_depthwise_im2col_kernel(CLDepthwiseIm2ColKernel &k) // Configure the local work size for Bifrost with a value obtained // via exhaustive autotuning for the MobileNets tensor shapes. - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76)) + if(gpu_target_is_in(gpu_target, + GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, + GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, + GPUTarget::G52, GPUTarget::G52LIT)) { lws_hint = cl::NDRange(1, 2, 1); } @@ -186,7 +196,10 @@ void tune_gemv_kernel(CLGEMMMatrixVectorMultiplyKernel &k) // Configure the local work size for Bifrost with a value obtained // via exhaustive autotuning for the MobileNets tensor shapes. - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76)) + if(gpu_target_is_in(gpu_target, + GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, + GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, + GPUTarget::G52, GPUTarget::G52LIT)) { lws_hint = cl::NDRange(1, 1, 1); } @@ -207,6 +220,8 @@ void tune_gemm_kernel(CLGEMMMatrixMultiplyKernel &k) case GPUTarget::G51: case GPUTarget::G51BIG: case GPUTarget::G51LIT: + case GPUTarget::G52: + case GPUTarget::G52LIT: case GPUTarget::G76: if(k._input1->info()->dimension(1) == 24) { @@ -240,7 +255,10 @@ void tune_pooling_kernel(CLPoolingLayerKernel &k) // invalid (e.g. exceeds the maximum workgroup size that the kernel can be launched with). if(k._input->info()->data_layout() == DataLayout::NCHW) { - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76)) + if(gpu_target_is_in(gpu_target, + GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, + GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, + GPUTarget::G52, GPUTarget::G52LIT)) { cl::NDRange gws = ICLKernel::gws_from_window(k.window()); lws_hint = cl::NDRange(gws[0], gws[1], 1); |