aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2018-04-10 14:24:35 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:37 +0000
commitfc1d1e2200f3056572b158b8208bac456f48339f (patch)
tree754c5ea55f2170afc2503d2fd67759c538660715
parenta72300a5e4d44cdadfe37f69e21f9bf628d19bb3 (diff)
downloadComputeLibrary-fc1d1e2200f3056572b158b8208bac456f48339f.tar.gz
COMPMID-959: Add FP32 support to GLES GEMMConvolution
The following kernels were supposed to have FP32 support but this was not the case because of bugs and missing shaders: - GCCol2Im - GCIm2Col - GCWeightsReshape Change-Id: Ie6ea464db0612757c71c3d40874e7bb0d60f170a Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/127572 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs97
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCCol2ImKernel.cpp22
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCIm2ColKernel.cpp2
-rw-r--r--tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp22
4 files changed, 108 insertions, 35 deletions
diff --git a/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs
index ad3f14d442..40b5a2beb0 100644
--- a/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs
+++ b/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs
@@ -62,12 +62,12 @@ SHADER_PARAMS_DECLARATION
uint total_filters;
};
-#if defined(DATA_TYPE_FP16)
+#if defined(DATA_TYPE_FP32)
-TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uint, dst_ptr, dst_shift, 2, writeonly);
+TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
+TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
#ifdef HAS_BIAS
-TENSOR_DECLARATION(3, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
+TENSOR_DECLARATION(3, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
#endif /* BIAS */
void main()
@@ -86,6 +86,55 @@ void main()
// Linearize convolution elements
if(is_last_thread)
{
+ for(uint i = 0u; i < uint(total_filters); ++i)
+ {
+ float s0 = LOAD_CURRENT_ITEM(src_ptr, src_iter);
+ STORE_CURRENT_ITEM(dst_ptr, dst_iter, s0);
+ TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (depth * src_attrs.stride_z));
+#ifdef HAS_BIAS
+ float b = LOAD_CURRENT_ITEM(biases_ptr, biases_iter);
+ STORE(dst_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_y), b);
+ TENSOR_ITERATOR_ADVANCE_IN_BYTES(biases_iter, biases_attrs.stride_x);
+#endif /* HAS_BIAS */
+ TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_x);
+ }
+ }
+ else
+ {
+ for(uint i = 0u; i < uint(total_filters); ++i)
+ {
+ float s0 = LOAD_CURRENT_ITEM(src_ptr, src_iter);
+ STORE_CURRENT_ITEM(dst_ptr, dst_iter, s0);
+ TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (depth * src_attrs.stride_z));
+ TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_x);
+ }
+ }
+}
+
+#elif defined(DATA_TYPE_FP16)
+
+TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
+TENSOR_DECLARATION(2, dstBuffer, uint, dst_ptr, dst_shift, 2, writeonly);
+#ifdef HAS_BIAS
+TENSOR_DECLARATION(3, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
+#endif /* BIAS */
+
+void main()
+{
+ Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
+ ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
+#ifdef HAS_BIAS
+ VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
+#endif /* BIAS */
+
+ bool is_last_thread = (((int(gl_GlobalInvocationID.x)) == (int(gl_NumWorkGroups.x * gl_WorkGroupSize.x) - 1)) && ((int(gl_GlobalInvocationID.y)) == (int(gl_NumWorkGroups.y * gl_WorkGroupSize.y) - 1))
+ && ((int(gl_GlobalInvocationID.z)) == (int(gl_NumWorkGroups.z * gl_WorkGroupSize.z) - 1)));
+ TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, ((uint(gl_GlobalInvocationID.x) * uint(dst_attrs.stride_y)) + (uint(gl_GlobalInvocationID.y) * uint(width) * uint(dst_attrs.stride_y)) + (uint(
+ gl_GlobalInvocationID.z)
+ * uint(width) * uint(height) * uint(dst_attrs.stride_y))));
+ // Linearize convolution elements
+ if(is_last_thread)
+ {
for(uint i = 0u; i < uint(total_filters); i = i + 2u)
{
vec2 s0 = LOAD_UNPACK2_CURRENT_ITEM_HALF(src_ptr, src_iter);
@@ -151,7 +200,7 @@ void main()
}
}
-#endif /* DATA_TYPE_FP16 */
+#endif /* DATA_TYPE_FP32 */
#endif // RESHAPE_TO_COLUMNS
#ifdef IM2COL_GENERIC
@@ -193,30 +242,31 @@ void main(void)
Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
- uint xc = gl_GlobalInvocationID.x; // x coordinate in the convolved tensor
- uint yc = gl_GlobalInvocationID.y; // y coordinate in the convolved tensor
- uint ch = gl_GlobalInvocationID.z % KERNEL_DEPTH; // input feature map
- uint batch = gl_GlobalInvocationID.z / KERNEL_DEPTH; // the batch
+ int xc = int(gl_GlobalInvocationID.x); // x coordinate in the convolved tensor
+ int yc = int(gl_GlobalInvocationID.y); // y coordinate in the convolved tensor
+ int ch = int(gl_GlobalInvocationID.z) % KERNEL_DEPTH; // input feature map
+ int batch = int(gl_GlobalInvocationID.z) / KERNEL_DEPTH; // the batch
// Calculate input indeces
- uint xi = xc * uint(STRIDE_X) - uint(PAD_LEFT);
- uint yi = yc * uint(STRIDE_Y) - uint(PAD_TOP);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (ch * src_attrs.stride_z) + (batch * src_stride_w));
+ int xi = xc * STRIDE_X - PAD_LEFT;
+ int yi = yc * STRIDE_Y - PAD_TOP;
+ TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (ch * int(src_attrs.stride_z)) + (batch * int(src_stride_w)));
// Calculate output indeces
- uint xo = ch * uint(KERNEL_WIDTH) * uint(KERNEL_HEIGHT);
- uint yo = xc + yc * uint(CONVOLVED_WIDTH); // Index of the convolution
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, (yo * dst_attrs.stride_y) + (batch * dst_stride_w) + xo);
+ int xo = ch * KERNEL_WIDTH * KERNEL_HEIGHT;
+ int yo = xc + yc * CONVOLVED_WIDTH; // Index of the convolution
+ // sizeof is not available in GLES, so we'll use stride_x
+ TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, (yo * int(dst_attrs.stride_y)) + (batch * int(dst_stride_w)) + xo * int(dst_attrs.stride_x));
uint src_pos = 0u;
// Linearize convolution elements
- for(uint y = yi, y_e = yi + uint(KERNEL_HEIGHT) * uint(DILATION_Y); y < y_e; y += uint(DILATION_Y))
+ for(int y = yi, y_e = yi + KERNEL_HEIGHT * DILATION_Y; y < y_e; y += DILATION_Y)
{
- for(uint x = xi, x_e = xi + uint(KERNEL_WIDTH) * uint(DILATION_X); x < x_e; x += uint(DILATION_X), TENSOR_OFFSET_ADVANCE(dst_iter, 1u))
+ for(int x = xi, x_e = xi + KERNEL_WIDTH * DILATION_X; x < x_e; x += DILATION_X, TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, int(dst_attrs.stride_x)))
{
#if PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0
- src_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * src_attrs.stride_x + y * src_attrs.stride_y);
+ src_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * int(src_attrs.stride_x) + y * int(src_attrs.stride_y));
STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD(src_ptr, src_pos));
#else /* PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0 */
if(x < 0 || x >= SRC_WIDTH || y < 0 || y >= SRC_HEIGHT)
@@ -225,7 +275,7 @@ void main(void)
}
else
{
- src_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * src_attrs.stride_x + y * src_attrs.stride_y);
+ src_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * int(src_attrs.stride_x) + y * int(src_attrs.stride_y));
STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD(src_ptr, src_pos));
}
#endif /* PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0 */
@@ -233,7 +283,7 @@ void main(void)
}
#ifdef HAS_BIAS
- if(ch == (uint(KERNEL_DEPTH) - 1))
+ if(ch == (KERNEL_DEPTH - 1))
{
STORE_CURRENT_ITEM(dst_ptr, dst_iter, 1.0f);
}
@@ -661,6 +711,7 @@ void main(void)
#endif /* DATA_TYPE_FP32 */
#endif /* IM2COL_REDUCED */
+#ifdef COL2IM
#ifdef WIDTH_OUTPUT
/** This kernel performs a reshaping of the output of the convolution layer.
@@ -694,10 +745,9 @@ void main(void)
Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
uvec3 pos = uvec3(gl_GlobalInvocationID.xyz);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, pos.x * src_attrs.step_y + pos.y * WIDTH_OUTPUT * src_attrs.step_y + (pos.z % dst_depth) * src_attrs.stride_x + (pos.z / dst_depth) * (src_attrs.stride_z));
+ TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, pos.x * src_attrs.step_y + pos.y * uint(WIDTH_OUTPUT) * src_attrs.step_y + (pos.z % dst_depth) * src_attrs.stride_x + (pos.z / dst_depth) * dst_strideZ);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter,
- LOAD_CURRENT_ITEM(src_ptr, src_iter));
+ STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD_CURRENT_ITEM(src_ptr, src_iter));
}
#elif defined(DATA_TYPE_FP16)
@@ -737,4 +787,5 @@ void main(void)
#else /* DATA_TYPE_FP32 */
#error Data type not supported
#endif /* DATA_TYPE_FP32 */
+#endif /* WIDTH_OUTPUT */
#endif /* COL2IM */
diff --git a/src/core/GLES_COMPUTE/kernels/GCCol2ImKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCCol2ImKernel.cpp
index af1e34ef59..1554a89672 100644
--- a/src/core/GLES_COMPUTE/kernels/GCCol2ImKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCCol2ImKernel.cpp
@@ -62,30 +62,32 @@ void GCCol2ImKernel::configure(const IGCTensor *input, IGCTensor *output,
_output = output;
_convolved_dims = convolved_dims;
- unsigned int num_elems_processed_per_iteration = 1;
+ const DataType dt = input->info()->data_type();
+ const unsigned int local_size = 1;
// Create kernel
std::set<std::string> build_opts;
+ build_opts.emplace("#define COL2IM ");
build_opts.emplace("#define WIDTH_OUTPUT " + support::cpp11::to_string(_convolved_dims.first));
- std::string dt_name = (input->info()->data_type() == DataType::F32) ? "DATA_TYPE_FP32" : "DATA_TYPE_FP16";
+ const std::string dt_name = (dt == DataType::F32) ? "DATA_TYPE_FP32" : "DATA_TYPE_FP16";
build_opts.emplace(("#define " + dt_name));
- build_opts.emplace("#define LOCAL_SIZE_X " + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.emplace("#define LOCAL_SIZE_Y " + support::cpp11::to_string(num_elems_processed_per_iteration));
- build_opts.emplace("#define LOCAL_SIZE_Z " + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.emplace("#define LOCAL_SIZE_X " + support::cpp11::to_string(local_size));
+ build_opts.emplace("#define LOCAL_SIZE_Y " + support::cpp11::to_string(local_size));
+ build_opts.emplace("#define LOCAL_SIZE_Z " + support::cpp11::to_string(local_size));
_kernel = static_cast<GCKernel>(GCKernelLibrary::get().create_kernel("col2im", build_opts));
// Configure window
- unsigned int nums = 2;
- Window win = calculate_max_window(*output->info(), Steps(nums));
+ const unsigned int num_elems_processed_per_iteration = (dt == DataType::F32) ? 1 : 2;
- AccessWindowHorizontal output_access(output->info(), 0, 2);
+ Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
+
+ AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
const int input_padding = ceil_to_multiple(input->info()->dimension(0), 2) - input->info()->dimension(0);
AccessWindowStatic input_access(input->info(), 0, 0, input->info()->dimension(0) + input_padding, input->info()->dimension(1) + 1);
- update_window_and_padding(win, input_access,
- output_access);
+ update_window_and_padding(win, input_access, output_access);
output_access.set_valid_region(win, output->info()->valid_region());
diff --git a/src/core/GLES_COMPUTE/kernels/GCIm2ColKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCIm2ColKernel.cpp
index eb790471fb..6c896168ed 100644
--- a/src/core/GLES_COMPUTE/kernels/GCIm2ColKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCIm2ColKernel.cpp
@@ -113,7 +113,7 @@ void GCIm2ColKernel::configure(const IGCTensor *input, IGCTensor *output, const
_convolved_dims = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1),
kernel_dims.width, kernel_dims.height,
conv_info, dilation);
- _num_elems_processed_per_iteration = 2;
+ _num_elems_processed_per_iteration = (input->info()->data_type() == DataType::F32) ? 1 : 2;
build_opts.emplace("#define KERNEL_WIDTH " + support::cpp11::to_string(kernel_dims.width));
build_opts.emplace("#define KERNEL_HEIGHT " + support::cpp11::to_string(kernel_dims.height));
diff --git a/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp b/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
index 8808d82d34..a23c3ec4d7 100644
--- a/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/ConvolutionLayer.cpp
@@ -45,13 +45,14 @@ namespace validation
namespace
{
RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+RelativeTolerance<float> tolerance_f32(0.00001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
{
DataType::F16,
- // DataType::F32,
+ DataType::F32,
});
const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
{
@@ -135,6 +136,25 @@ FIXTURE_DATA_TEST_CASE(RunLarge, GCConvolutionLayerFixture<half>, framework::Dat
validate(GCAccessor(_target), _reference, tolerance_f16, tolerance_num);
}
TEST_SUITE_END()
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, GCConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true, false })),
+ framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
+{
+ // Validate output
+ validate(GCAccessor(_target), _reference, tolerance_f32, tolerance_num);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, GCConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true, false })),
+ framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
+{
+ // Validate output
+ validate(GCAccessor(_target), _reference, tolerance_f32, tolerance_num);
+}
+TEST_SUITE_END()
TEST_SUITE_END()
TEST_SUITE_END()