aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2017-09-04 18:44:23 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 13:03:09 +0100
commit6ff3b19ee6120edf015fad8caab2991faa3070af (patch)
treea7a6dcd16dfd56d79fa1b56a313caeebcc939b68 /include
downloadComputeLibrary-6ff3b19ee6120edf015fad8caab2991faa3070af.tar.gz
COMPMID-344 Updated doxygen
Change-Id: I32f7b84daa560e460b77216add529c8fa8b327ae
Diffstat (limited to 'include')
-rw-r--r--include/CL/cl.h1214
-rw-r--r--include/CL/cl.hpp12452
-rw-r--r--include/CL/cl2.hpp9526
-rw-r--r--include/CL/cl_d3d10.h126
-rw-r--r--include/CL/cl_d3d11.h126
-rw-r--r--include/CL/cl_dx9_media_sharing.h127
-rw-r--r--include/CL/cl_egl.h131
-rw-r--r--include/CL/cl_ext.h316
-rw-r--r--include/CL/cl_gl.h162
-rw-r--r--include/CL/cl_gl_ext.h69
-rw-r--r--include/CL/cl_platform.h1254
-rw-r--r--include/CL/opencl.h54
12 files changed, 25557 insertions, 0 deletions
diff --git a/include/CL/cl.h b/include/CL/cl.h
new file mode 100644
index 0000000000..b41b1f9ff8
--- /dev/null
+++ b/include/CL/cl.h
@@ -0,0 +1,1214 @@
+/*******************************************************************************
+ * Copyright (c) 2008 - 2012 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ ******************************************************************************/
+
+#ifndef __OPENCL_CL_H
+#define __OPENCL_CL_H
+
+#ifdef __APPLE__
+#include <OpenCL/cl_platform.h>
+#else
+#include <CL/cl_platform.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/******************************************************************************/
+
+typedef struct _cl_platform_id * cl_platform_id;
+typedef struct _cl_device_id * cl_device_id;
+typedef struct _cl_context * cl_context;
+typedef struct _cl_command_queue * cl_command_queue;
+typedef struct _cl_mem * cl_mem;
+typedef struct _cl_program * cl_program;
+typedef struct _cl_kernel * cl_kernel;
+typedef struct _cl_event * cl_event;
+typedef struct _cl_sampler * cl_sampler;
+
+typedef cl_uint cl_bool; /* WARNING! Unlike cl_ types in cl_platform.h, cl_bool is not guaranteed to be the same size as the bool in kernels. */
+typedef cl_ulong cl_bitfield;
+typedef cl_bitfield cl_device_type;
+typedef cl_uint cl_platform_info;
+typedef cl_uint cl_device_info;
+typedef cl_bitfield cl_device_fp_config;
+typedef cl_uint cl_device_mem_cache_type;
+typedef cl_uint cl_device_local_mem_type;
+typedef cl_bitfield cl_device_exec_capabilities;
+typedef cl_bitfield cl_command_queue_properties;
+typedef intptr_t cl_device_partition_property;
+typedef cl_bitfield cl_device_affinity_domain;
+
+typedef intptr_t cl_context_properties;
+typedef cl_uint cl_context_info;
+typedef cl_uint cl_command_queue_info;
+typedef cl_uint cl_channel_order;
+typedef cl_uint cl_channel_type;
+typedef cl_bitfield cl_mem_flags;
+typedef cl_uint cl_mem_object_type;
+typedef cl_uint cl_mem_info;
+typedef cl_bitfield cl_mem_migration_flags;
+typedef cl_uint cl_image_info;
+typedef cl_uint cl_buffer_create_type;
+typedef cl_uint cl_addressing_mode;
+typedef cl_uint cl_filter_mode;
+typedef cl_uint cl_sampler_info;
+typedef cl_bitfield cl_map_flags;
+typedef cl_uint cl_program_info;
+typedef cl_uint cl_program_build_info;
+typedef cl_uint cl_program_binary_type;
+typedef cl_int cl_build_status;
+typedef cl_uint cl_kernel_info;
+typedef cl_uint cl_kernel_arg_info;
+typedef cl_uint cl_kernel_arg_address_qualifier;
+typedef cl_uint cl_kernel_arg_access_qualifier;
+typedef cl_bitfield cl_kernel_arg_type_qualifier;
+typedef cl_uint cl_kernel_work_group_info;
+typedef cl_uint cl_event_info;
+typedef cl_uint cl_command_type;
+typedef cl_uint cl_profiling_info;
+
+
+typedef struct _cl_image_format {
+ cl_channel_order image_channel_order;
+ cl_channel_type image_channel_data_type;
+} cl_image_format;
+
+typedef struct _cl_image_desc {
+ cl_mem_object_type image_type;
+ size_t image_width;
+ size_t image_height;
+ size_t image_depth;
+ size_t image_array_size;
+ size_t image_row_pitch;
+ size_t image_slice_pitch;
+ cl_uint num_mip_levels;
+ cl_uint num_samples;
+ cl_mem buffer;
+} cl_image_desc;
+
+typedef struct _cl_buffer_region {
+ size_t origin;
+ size_t size;
+} cl_buffer_region;
+
+
+/******************************************************************************/
+
+/* Error Codes */
+#define CL_SUCCESS 0
+#define CL_DEVICE_NOT_FOUND -1
+#define CL_DEVICE_NOT_AVAILABLE -2
+#define CL_COMPILER_NOT_AVAILABLE -3
+#define CL_MEM_OBJECT_ALLOCATION_FAILURE -4
+#define CL_OUT_OF_RESOURCES -5
+#define CL_OUT_OF_HOST_MEMORY -6
+#define CL_PROFILING_INFO_NOT_AVAILABLE -7
+#define CL_MEM_COPY_OVERLAP -8
+#define CL_IMAGE_FORMAT_MISMATCH -9
+#define CL_IMAGE_FORMAT_NOT_SUPPORTED -10
+#define CL_BUILD_PROGRAM_FAILURE -11
+#define CL_MAP_FAILURE -12
+#define CL_MISALIGNED_SUB_BUFFER_OFFSET -13
+#define CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST -14
+#define CL_COMPILE_PROGRAM_FAILURE -15
+#define CL_LINKER_NOT_AVAILABLE -16
+#define CL_LINK_PROGRAM_FAILURE -17
+#define CL_DEVICE_PARTITION_FAILED -18
+#define CL_KERNEL_ARG_INFO_NOT_AVAILABLE -19
+
+#define CL_INVALID_VALUE -30
+#define CL_INVALID_DEVICE_TYPE -31
+#define CL_INVALID_PLATFORM -32
+#define CL_INVALID_DEVICE -33
+#define CL_INVALID_CONTEXT -34
+#define CL_INVALID_QUEUE_PROPERTIES -35
+#define CL_INVALID_COMMAND_QUEUE -36
+#define CL_INVALID_HOST_PTR -37
+#define CL_INVALID_MEM_OBJECT -38
+#define CL_INVALID_IMAGE_FORMAT_DESCRIPTOR -39
+#define CL_INVALID_IMAGE_SIZE -40
+#define CL_INVALID_SAMPLER -41
+#define CL_INVALID_BINARY -42
+#define CL_INVALID_BUILD_OPTIONS -43
+#define CL_INVALID_PROGRAM -44
+#define CL_INVALID_PROGRAM_EXECUTABLE -45
+#define CL_INVALID_KERNEL_NAME -46
+#define CL_INVALID_KERNEL_DEFINITION -47
+#define CL_INVALID_KERNEL -48
+#define CL_INVALID_ARG_INDEX -49
+#define CL_INVALID_ARG_VALUE -50
+#define CL_INVALID_ARG_SIZE -51
+#define CL_INVALID_KERNEL_ARGS -52
+#define CL_INVALID_WORK_DIMENSION -53
+#define CL_INVALID_WORK_GROUP_SIZE -54
+#define CL_INVALID_WORK_ITEM_SIZE -55
+#define CL_INVALID_GLOBAL_OFFSET -56
+#define CL_INVALID_EVENT_WAIT_LIST -57
+#define CL_INVALID_EVENT -58
+#define CL_INVALID_OPERATION -59
+#define CL_INVALID_GL_OBJECT -60
+#define CL_INVALID_BUFFER_SIZE -61
+#define CL_INVALID_MIP_LEVEL -62
+#define CL_INVALID_GLOBAL_WORK_SIZE -63
+#define CL_INVALID_PROPERTY -64
+#define CL_INVALID_IMAGE_DESCRIPTOR -65
+#define CL_INVALID_COMPILER_OPTIONS -66
+#define CL_INVALID_LINKER_OPTIONS -67
+#define CL_INVALID_DEVICE_PARTITION_COUNT -68
+
+/* OpenCL Version */
+#define CL_VERSION_1_0 1
+#define CL_VERSION_1_1 1
+#define CL_VERSION_1_2 1
+
+/* cl_bool */
+#define CL_FALSE 0
+#define CL_TRUE 1
+#define CL_BLOCKING CL_TRUE
+#define CL_NON_BLOCKING CL_FALSE
+
+/* cl_platform_info */
+#define CL_PLATFORM_PROFILE 0x0900
+#define CL_PLATFORM_VERSION 0x0901
+#define CL_PLATFORM_NAME 0x0902
+#define CL_PLATFORM_VENDOR 0x0903
+#define CL_PLATFORM_EXTENSIONS 0x0904
+
+/* cl_device_type - bitfield */
+#define CL_DEVICE_TYPE_DEFAULT (1 << 0)
+#define CL_DEVICE_TYPE_CPU (1 << 1)
+#define CL_DEVICE_TYPE_GPU (1 << 2)
+#define CL_DEVICE_TYPE_ACCELERATOR (1 << 3)
+#define CL_DEVICE_TYPE_CUSTOM (1 << 4)
+#define CL_DEVICE_TYPE_ALL 0xFFFFFFFF
+
+/* cl_device_info */
+#define CL_DEVICE_TYPE 0x1000
+#define CL_DEVICE_VENDOR_ID 0x1001
+#define CL_DEVICE_MAX_COMPUTE_UNITS 0x1002
+#define CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS 0x1003
+#define CL_DEVICE_MAX_WORK_GROUP_SIZE 0x1004
+#define CL_DEVICE_MAX_WORK_ITEM_SIZES 0x1005
+#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR 0x1006
+#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT 0x1007
+#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT 0x1008
+#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG 0x1009
+#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT 0x100A
+#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE 0x100B
+#define CL_DEVICE_MAX_CLOCK_FREQUENCY 0x100C
+#define CL_DEVICE_ADDRESS_BITS 0x100D
+#define CL_DEVICE_MAX_READ_IMAGE_ARGS 0x100E
+#define CL_DEVICE_MAX_WRITE_IMAGE_ARGS 0x100F
+#define CL_DEVICE_MAX_MEM_ALLOC_SIZE 0x1010
+#define CL_DEVICE_IMAGE2D_MAX_WIDTH 0x1011
+#define CL_DEVICE_IMAGE2D_MAX_HEIGHT 0x1012
+#define CL_DEVICE_IMAGE3D_MAX_WIDTH 0x1013
+#define CL_DEVICE_IMAGE3D_MAX_HEIGHT 0x1014
+#define CL_DEVICE_IMAGE3D_MAX_DEPTH 0x1015
+#define CL_DEVICE_IMAGE_SUPPORT 0x1016
+#define CL_DEVICE_MAX_PARAMETER_SIZE 0x1017
+#define CL_DEVICE_MAX_SAMPLERS 0x1018
+#define CL_DEVICE_MEM_BASE_ADDR_ALIGN 0x1019
+#define CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE 0x101A
+#define CL_DEVICE_SINGLE_FP_CONFIG 0x101B
+#define CL_DEVICE_GLOBAL_MEM_CACHE_TYPE 0x101C
+#define CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE 0x101D
+#define CL_DEVICE_GLOBAL_MEM_CACHE_SIZE 0x101E
+#define CL_DEVICE_GLOBAL_MEM_SIZE 0x101F
+#define CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE 0x1020
+#define CL_DEVICE_MAX_CONSTANT_ARGS 0x1021
+#define CL_DEVICE_LOCAL_MEM_TYPE 0x1022
+#define CL_DEVICE_LOCAL_MEM_SIZE 0x1023
+#define CL_DEVICE_ERROR_CORRECTION_SUPPORT 0x1024
+#define CL_DEVICE_PROFILING_TIMER_RESOLUTION 0x1025
+#define CL_DEVICE_ENDIAN_LITTLE 0x1026
+#define CL_DEVICE_AVAILABLE 0x1027
+#define CL_DEVICE_COMPILER_AVAILABLE 0x1028
+#define CL_DEVICE_EXECUTION_CAPABILITIES 0x1029
+#define CL_DEVICE_QUEUE_PROPERTIES 0x102A
+#define CL_DEVICE_NAME 0x102B
+#define CL_DEVICE_VENDOR 0x102C
+#define CL_DRIVER_VERSION 0x102D
+#define CL_DEVICE_PROFILE 0x102E
+#define CL_DEVICE_VERSION 0x102F
+#define CL_DEVICE_EXTENSIONS 0x1030
+#define CL_DEVICE_PLATFORM 0x1031
+#define CL_DEVICE_DOUBLE_FP_CONFIG 0x1032
+/* 0x1033 reserved for CL_DEVICE_HALF_FP_CONFIG */
+#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF 0x1034
+#define CL_DEVICE_HOST_UNIFIED_MEMORY 0x1035
+#define CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR 0x1036
+#define CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT 0x1037
+#define CL_DEVICE_NATIVE_VECTOR_WIDTH_INT 0x1038
+#define CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG 0x1039
+#define CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT 0x103A
+#define CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE 0x103B
+#define CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF 0x103C
+#define CL_DEVICE_OPENCL_C_VERSION 0x103D
+#define CL_DEVICE_LINKER_AVAILABLE 0x103E
+#define CL_DEVICE_BUILT_IN_KERNELS 0x103F
+#define CL_DEVICE_IMAGE_MAX_BUFFER_SIZE 0x1040
+#define CL_DEVICE_IMAGE_MAX_ARRAY_SIZE 0x1041
+#define CL_DEVICE_PARENT_DEVICE 0x1042
+#define CL_DEVICE_PARTITION_MAX_SUB_DEVICES 0x1043
+#define CL_DEVICE_PARTITION_PROPERTIES 0x1044
+#define CL_DEVICE_PARTITION_AFFINITY_DOMAIN 0x1045
+#define CL_DEVICE_PARTITION_TYPE 0x1046
+#define CL_DEVICE_REFERENCE_COUNT 0x1047
+#define CL_DEVICE_PREFERRED_INTEROP_USER_SYNC 0x1048
+#define CL_DEVICE_PRINTF_BUFFER_SIZE 0x1049
+#define CL_DEVICE_IMAGE_PITCH_ALIGNMENT 0x104A
+#define CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT 0x104B
+
+/* cl_device_fp_config - bitfield */
+#define CL_FP_DENORM (1 << 0)
+#define CL_FP_INF_NAN (1 << 1)
+#define CL_FP_ROUND_TO_NEAREST (1 << 2)
+#define CL_FP_ROUND_TO_ZERO (1 << 3)
+#define CL_FP_ROUND_TO_INF (1 << 4)
+#define CL_FP_FMA (1 << 5)
+#define CL_FP_SOFT_FLOAT (1 << 6)
+#define CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT (1 << 7)
+
+/* cl_device_mem_cache_type */
+#define CL_NONE 0x0
+#define CL_READ_ONLY_CACHE 0x1
+#define CL_READ_WRITE_CACHE 0x2
+
+/* cl_device_local_mem_type */
+#define CL_LOCAL 0x1
+#define CL_GLOBAL 0x2
+
+/* cl_device_exec_capabilities - bitfield */
+#define CL_EXEC_KERNEL (1 << 0)
+#define CL_EXEC_NATIVE_KERNEL (1 << 1)
+
+/* cl_command_queue_properties - bitfield */
+#define CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE (1 << 0)
+#define CL_QUEUE_PROFILING_ENABLE (1 << 1)
+
+/* cl_context_info */
+#define CL_CONTEXT_REFERENCE_COUNT 0x1080
+#define CL_CONTEXT_DEVICES 0x1081
+#define CL_CONTEXT_PROPERTIES 0x1082
+#define CL_CONTEXT_NUM_DEVICES 0x1083
+
+/* cl_context_properties */
+#define CL_CONTEXT_PLATFORM 0x1084
+#define CL_CONTEXT_INTEROP_USER_SYNC 0x1085
+
+/* cl_device_partition_property */
+#define CL_DEVICE_PARTITION_EQUALLY 0x1086
+#define CL_DEVICE_PARTITION_BY_COUNTS 0x1087
+#define CL_DEVICE_PARTITION_BY_COUNTS_LIST_END 0x0
+#define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN 0x1088
+
+/* cl_device_affinity_domain */
+#define CL_DEVICE_AFFINITY_DOMAIN_NUMA (1 << 0)
+#define CL_DEVICE_AFFINITY_DOMAIN_L4_CACHE (1 << 1)
+#define CL_DEVICE_AFFINITY_DOMAIN_L3_CACHE (1 << 2)
+#define CL_DEVICE_AFFINITY_DOMAIN_L2_CACHE (1 << 3)
+#define CL_DEVICE_AFFINITY_DOMAIN_L1_CACHE (1 << 4)
+#define CL_DEVICE_AFFINITY_DOMAIN_NEXT_PARTITIONABLE (1 << 5)
+
+/* cl_command_queue_info */
+#define CL_QUEUE_CONTEXT 0x1090
+#define CL_QUEUE_DEVICE 0x1091
+#define CL_QUEUE_REFERENCE_COUNT 0x1092
+#define CL_QUEUE_PROPERTIES 0x1093
+
+/* cl_mem_flags - bitfield */
+#define CL_MEM_READ_WRITE (1 << 0)
+#define CL_MEM_WRITE_ONLY (1 << 1)
+#define CL_MEM_READ_ONLY (1 << 2)
+#define CL_MEM_USE_HOST_PTR (1 << 3)
+#define CL_MEM_ALLOC_HOST_PTR (1 << 4)
+#define CL_MEM_COPY_HOST_PTR (1 << 5)
+/* reserved (1 << 6)*/
+#define CL_MEM_HOST_WRITE_ONLY (1 << 7)
+#define CL_MEM_HOST_READ_ONLY (1 << 8)
+#define CL_MEM_HOST_NO_ACCESS (1 << 9)
+
+/* cl_mem_migration_flags - bitfield */
+#define CL_MIGRATE_MEM_OBJECT_HOST (1 << 0)
+#define CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED (1 << 1)
+
+/* cl_channel_order */
+#define CL_R 0x10B0
+#define CL_A 0x10B1
+#define CL_RG 0x10B2
+#define CL_RA 0x10B3
+#define CL_RGB 0x10B4
+#define CL_RGBA 0x10B5
+#define CL_BGRA 0x10B6
+#define CL_ARGB 0x10B7
+#define CL_INTENSITY 0x10B8
+#define CL_LUMINANCE 0x10B9
+#define CL_Rx 0x10BA
+#define CL_RGx 0x10BB
+#define CL_RGBx 0x10BC
+#define CL_DEPTH 0x10BD
+#define CL_DEPTH_STENCIL 0x10BE
+
+/* cl_channel_type */
+#define CL_SNORM_INT8 0x10D0
+#define CL_SNORM_INT16 0x10D1
+#define CL_UNORM_INT8 0x10D2
+#define CL_UNORM_INT16 0x10D3
+#define CL_UNORM_SHORT_565 0x10D4
+#define CL_UNORM_SHORT_555 0x10D5
+#define CL_UNORM_INT_101010 0x10D6
+#define CL_SIGNED_INT8 0x10D7
+#define CL_SIGNED_INT16 0x10D8
+#define CL_SIGNED_INT32 0x10D9
+#define CL_UNSIGNED_INT8 0x10DA
+#define CL_UNSIGNED_INT16 0x10DB
+#define CL_UNSIGNED_INT32 0x10DC
+#define CL_HALF_FLOAT 0x10DD
+#define CL_FLOAT 0x10DE
+#define CL_UNORM_INT24 0x10DF
+
+/* cl_mem_object_type */
+#define CL_MEM_OBJECT_BUFFER 0x10F0
+#define CL_MEM_OBJECT_IMAGE2D 0x10F1
+#define CL_MEM_OBJECT_IMAGE3D 0x10F2
+#define CL_MEM_OBJECT_IMAGE2D_ARRAY 0x10F3
+#define CL_MEM_OBJECT_IMAGE1D 0x10F4
+#define CL_MEM_OBJECT_IMAGE1D_ARRAY 0x10F5
+#define CL_MEM_OBJECT_IMAGE1D_BUFFER 0x10F6
+
+/* cl_mem_info */
+#define CL_MEM_TYPE 0x1100
+#define CL_MEM_FLAGS 0x1101
+#define CL_MEM_SIZE 0x1102
+#define CL_MEM_HOST_PTR 0x1103
+#define CL_MEM_MAP_COUNT 0x1104
+#define CL_MEM_REFERENCE_COUNT 0x1105
+#define CL_MEM_CONTEXT 0x1106
+#define CL_MEM_ASSOCIATED_MEMOBJECT 0x1107
+#define CL_MEM_OFFSET 0x1108
+
+/* cl_image_info */
+#define CL_IMAGE_FORMAT 0x1110
+#define CL_IMAGE_ELEMENT_SIZE 0x1111
+#define CL_IMAGE_ROW_PITCH 0x1112
+#define CL_IMAGE_SLICE_PITCH 0x1113
+#define CL_IMAGE_WIDTH 0x1114
+#define CL_IMAGE_HEIGHT 0x1115
+#define CL_IMAGE_DEPTH 0x1116
+#define CL_IMAGE_ARRAY_SIZE 0x1117
+#define CL_IMAGE_BUFFER 0x1118
+#define CL_IMAGE_NUM_MIP_LEVELS 0x1119
+#define CL_IMAGE_NUM_SAMPLES 0x111A
+
+/* cl_addressing_mode */
+#define CL_ADDRESS_NONE 0x1130
+#define CL_ADDRESS_CLAMP_TO_EDGE 0x1131
+#define CL_ADDRESS_CLAMP 0x1132
+#define CL_ADDRESS_REPEAT 0x1133
+#define CL_ADDRESS_MIRRORED_REPEAT 0x1134
+
+/* cl_filter_mode */
+#define CL_FILTER_NEAREST 0x1140
+#define CL_FILTER_LINEAR 0x1141
+
+/* cl_sampler_info */
+#define CL_SAMPLER_REFERENCE_COUNT 0x1150
+#define CL_SAMPLER_CONTEXT 0x1151
+#define CL_SAMPLER_NORMALIZED_COORDS 0x1152
+#define CL_SAMPLER_ADDRESSING_MODE 0x1153
+#define CL_SAMPLER_FILTER_MODE 0x1154
+
+/* cl_map_flags - bitfield */
+#define CL_MAP_READ (1 << 0)
+#define CL_MAP_WRITE (1 << 1)
+#define CL_MAP_WRITE_INVALIDATE_REGION (1 << 2)
+
+/* cl_program_info */
+#define CL_PROGRAM_REFERENCE_COUNT 0x1160
+#define CL_PROGRAM_CONTEXT 0x1161
+#define CL_PROGRAM_NUM_DEVICES 0x1162
+#define CL_PROGRAM_DEVICES 0x1163
+#define CL_PROGRAM_SOURCE 0x1164
+#define CL_PROGRAM_BINARY_SIZES 0x1165
+#define CL_PROGRAM_BINARIES 0x1166
+#define CL_PROGRAM_NUM_KERNELS 0x1167
+#define CL_PROGRAM_KERNEL_NAMES 0x1168
+
+/* cl_program_build_info */
+#define CL_PROGRAM_BUILD_STATUS 0x1181
+#define CL_PROGRAM_BUILD_OPTIONS 0x1182
+#define CL_PROGRAM_BUILD_LOG 0x1183
+#define CL_PROGRAM_BINARY_TYPE 0x1184
+
+/* cl_program_binary_type */
+#define CL_PROGRAM_BINARY_TYPE_NONE 0x0
+#define CL_PROGRAM_BINARY_TYPE_COMPILED_OBJECT 0x1
+#define CL_PROGRAM_BINARY_TYPE_LIBRARY 0x2
+#define CL_PROGRAM_BINARY_TYPE_EXECUTABLE 0x4
+
+/* cl_build_status */
+#define CL_BUILD_SUCCESS 0
+#define CL_BUILD_NONE -1
+#define CL_BUILD_ERROR -2
+#define CL_BUILD_IN_PROGRESS -3
+
+/* cl_kernel_info */
+#define CL_KERNEL_FUNCTION_NAME 0x1190
+#define CL_KERNEL_NUM_ARGS 0x1191
+#define CL_KERNEL_REFERENCE_COUNT 0x1192
+#define CL_KERNEL_CONTEXT 0x1193
+#define CL_KERNEL_PROGRAM 0x1194
+#define CL_KERNEL_ATTRIBUTES 0x1195
+
+/* cl_kernel_arg_info */
+#define CL_KERNEL_ARG_ADDRESS_QUALIFIER 0x1196
+#define CL_KERNEL_ARG_ACCESS_QUALIFIER 0x1197
+#define CL_KERNEL_ARG_TYPE_NAME 0x1198
+#define CL_KERNEL_ARG_TYPE_QUALIFIER 0x1199
+#define CL_KERNEL_ARG_NAME 0x119A
+
+/* cl_kernel_arg_address_qualifier */
+#define CL_KERNEL_ARG_ADDRESS_GLOBAL 0x119B
+#define CL_KERNEL_ARG_ADDRESS_LOCAL 0x119C
+#define CL_KERNEL_ARG_ADDRESS_CONSTANT 0x119D
+#define CL_KERNEL_ARG_ADDRESS_PRIVATE 0x119E
+
+/* cl_kernel_arg_access_qualifier */
+#define CL_KERNEL_ARG_ACCESS_READ_ONLY 0x11A0
+#define CL_KERNEL_ARG_ACCESS_WRITE_ONLY 0x11A1
+#define CL_KERNEL_ARG_ACCESS_READ_WRITE 0x11A2
+#define CL_KERNEL_ARG_ACCESS_NONE 0x11A3
+
+/* cl_kernel_arg_type_qualifer */
+#define CL_KERNEL_ARG_TYPE_NONE 0
+#define CL_KERNEL_ARG_TYPE_CONST (1 << 0)
+#define CL_KERNEL_ARG_TYPE_RESTRICT (1 << 1)
+#define CL_KERNEL_ARG_TYPE_VOLATILE (1 << 2)
+
+/* cl_kernel_work_group_info */
+#define CL_KERNEL_WORK_GROUP_SIZE 0x11B0
+#define CL_KERNEL_COMPILE_WORK_GROUP_SIZE 0x11B1
+#define CL_KERNEL_LOCAL_MEM_SIZE 0x11B2
+#define CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE 0x11B3
+#define CL_KERNEL_PRIVATE_MEM_SIZE 0x11B4
+#define CL_KERNEL_GLOBAL_WORK_SIZE 0x11B5
+
+/* cl_event_info */
+#define CL_EVENT_COMMAND_QUEUE 0x11D0
+#define CL_EVENT_COMMAND_TYPE 0x11D1
+#define CL_EVENT_REFERENCE_COUNT 0x11D2
+#define CL_EVENT_COMMAND_EXECUTION_STATUS 0x11D3
+#define CL_EVENT_CONTEXT 0x11D4
+
+/* cl_command_type */
+#define CL_COMMAND_NDRANGE_KERNEL 0x11F0
+#define CL_COMMAND_TASK 0x11F1
+#define CL_COMMAND_NATIVE_KERNEL 0x11F2
+#define CL_COMMAND_READ_BUFFER 0x11F3
+#define CL_COMMAND_WRITE_BUFFER 0x11F4
+#define CL_COMMAND_COPY_BUFFER 0x11F5
+#define CL_COMMAND_READ_IMAGE 0x11F6
+#define CL_COMMAND_WRITE_IMAGE 0x11F7
+#define CL_COMMAND_COPY_IMAGE 0x11F8
+#define CL_COMMAND_COPY_IMAGE_TO_BUFFER 0x11F9
+#define CL_COMMAND_COPY_BUFFER_TO_IMAGE 0x11FA
+#define CL_COMMAND_MAP_BUFFER 0x11FB
+#define CL_COMMAND_MAP_IMAGE 0x11FC
+#define CL_COMMAND_UNMAP_MEM_OBJECT 0x11FD
+#define CL_COMMAND_MARKER 0x11FE
+#define CL_COMMAND_ACQUIRE_GL_OBJECTS 0x11FF
+#define CL_COMMAND_RELEASE_GL_OBJECTS 0x1200
+#define CL_COMMAND_READ_BUFFER_RECT 0x1201
+#define CL_COMMAND_WRITE_BUFFER_RECT 0x1202
+#define CL_COMMAND_COPY_BUFFER_RECT 0x1203
+#define CL_COMMAND_USER 0x1204
+#define CL_COMMAND_BARRIER 0x1205
+#define CL_COMMAND_MIGRATE_MEM_OBJECTS 0x1206
+#define CL_COMMAND_FILL_BUFFER 0x1207
+#define CL_COMMAND_FILL_IMAGE 0x1208
+
+/* command execution status */
+#define CL_COMPLETE 0x0
+#define CL_RUNNING 0x1
+#define CL_SUBMITTED 0x2
+#define CL_QUEUED 0x3
+
+/* cl_buffer_create_type */
+#define CL_BUFFER_CREATE_TYPE_REGION 0x1220
+
+/* cl_profiling_info */
+#define CL_PROFILING_COMMAND_QUEUED 0x1280
+#define CL_PROFILING_COMMAND_SUBMIT 0x1281
+#define CL_PROFILING_COMMAND_START 0x1282
+#define CL_PROFILING_COMMAND_END 0x1283
+
+/********************************************************************************************************/
+
+/* Platform API */
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetPlatformIDs(cl_uint /* num_entries */,
+ cl_platform_id * /* platforms */,
+ cl_uint * /* num_platforms */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetPlatformInfo(cl_platform_id /* platform */,
+ cl_platform_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+/* Device APIs */
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetDeviceIDs(cl_platform_id /* platform */,
+ cl_device_type /* device_type */,
+ cl_uint /* num_entries */,
+ cl_device_id * /* devices */,
+ cl_uint * /* num_devices */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetDeviceInfo(cl_device_id /* device */,
+ cl_device_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clCreateSubDevices(cl_device_id /* in_device */,
+ const cl_device_partition_property * /* properties */,
+ cl_uint /* num_devices */,
+ cl_device_id * /* out_devices */,
+ cl_uint * /* num_devices_ret */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clRetainDevice(cl_device_id /* device */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clReleaseDevice(cl_device_id /* device */) CL_API_SUFFIX__VERSION_1_2;
+
+/* Context APIs */
+extern CL_API_ENTRY cl_context CL_API_CALL
+clCreateContext(const cl_context_properties * /* properties */,
+ cl_uint /* num_devices */,
+ const cl_device_id * /* devices */,
+ void (CL_CALLBACK * /* pfn_notify */)(const char *, const void *, size_t, void *),
+ void * /* user_data */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_context CL_API_CALL
+clCreateContextFromType(const cl_context_properties * /* properties */,
+ cl_device_type /* device_type */,
+ void (CL_CALLBACK * /* pfn_notify*/ )(const char *, const void *, size_t, void *),
+ void * /* user_data */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clRetainContext(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clReleaseContext(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetContextInfo(cl_context /* context */,
+ cl_context_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+/* Command Queue APIs */
+extern CL_API_ENTRY cl_command_queue CL_API_CALL
+clCreateCommandQueue(cl_context /* context */,
+ cl_device_id /* device */,
+ cl_command_queue_properties /* properties */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clRetainCommandQueue(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clReleaseCommandQueue(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetCommandQueueInfo(cl_command_queue /* command_queue */,
+ cl_command_queue_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+/* Memory Object APIs */
+extern CL_API_ENTRY cl_mem CL_API_CALL
+clCreateBuffer(cl_context /* context */,
+ cl_mem_flags /* flags */,
+ size_t /* size */,
+ void * /* host_ptr */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_mem CL_API_CALL
+clCreateSubBuffer(cl_mem /* buffer */,
+ cl_mem_flags /* flags */,
+ cl_buffer_create_type /* buffer_create_type */,
+ const void * /* buffer_create_info */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_1;
+
+extern CL_API_ENTRY cl_mem CL_API_CALL
+clCreateImage(cl_context /* context */,
+ cl_mem_flags /* flags */,
+ const cl_image_format * /* image_format */,
+ const cl_image_desc * /* image_desc */,
+ void * /* host_ptr */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clRetainMemObject(cl_mem /* memobj */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clReleaseMemObject(cl_mem /* memobj */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetSupportedImageFormats(cl_context /* context */,
+ cl_mem_flags /* flags */,
+ cl_mem_object_type /* image_type */,
+ cl_uint /* num_entries */,
+ cl_image_format * /* image_formats */,
+ cl_uint * /* num_image_formats */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetMemObjectInfo(cl_mem /* memobj */,
+ cl_mem_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetImageInfo(cl_mem /* image */,
+ cl_image_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clSetMemObjectDestructorCallback( cl_mem /* memobj */,
+ void (CL_CALLBACK * /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/),
+ void * /*user_data */ ) CL_API_SUFFIX__VERSION_1_1;
+
+/* Sampler APIs */
+extern CL_API_ENTRY cl_sampler CL_API_CALL
+clCreateSampler(cl_context /* context */,
+ cl_bool /* normalized_coords */,
+ cl_addressing_mode /* addressing_mode */,
+ cl_filter_mode /* filter_mode */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clRetainSampler(cl_sampler /* sampler */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clReleaseSampler(cl_sampler /* sampler */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetSamplerInfo(cl_sampler /* sampler */,
+ cl_sampler_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+/* Program Object APIs */
+extern CL_API_ENTRY cl_program CL_API_CALL
+clCreateProgramWithSource(cl_context /* context */,
+ cl_uint /* count */,
+ const char ** /* strings */,
+ const size_t * /* lengths */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_program CL_API_CALL
+clCreateProgramWithBinary(cl_context /* context */,
+ cl_uint /* num_devices */,
+ const cl_device_id * /* device_list */,
+ const size_t * /* lengths */,
+ const unsigned char ** /* binaries */,
+ cl_int * /* binary_status */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_program CL_API_CALL
+clCreateProgramWithBuiltInKernels(cl_context /* context */,
+ cl_uint /* num_devices */,
+ const cl_device_id * /* device_list */,
+ const char * /* kernel_names */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clRetainProgram(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clReleaseProgram(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clBuildProgram(cl_program /* program */,
+ cl_uint /* num_devices */,
+ const cl_device_id * /* device_list */,
+ const char * /* options */,
+ void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */),
+ void * /* user_data */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clCompileProgram(cl_program /* program */,
+ cl_uint /* num_devices */,
+ const cl_device_id * /* device_list */,
+ const char * /* options */,
+ cl_uint /* num_input_headers */,
+ const cl_program * /* input_headers */,
+ const char ** /* header_include_names */,
+ void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */),
+ void * /* user_data */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_program CL_API_CALL
+clLinkProgram(cl_context /* context */,
+ cl_uint /* num_devices */,
+ const cl_device_id * /* device_list */,
+ const char * /* options */,
+ cl_uint /* num_input_programs */,
+ const cl_program * /* input_programs */,
+ void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */),
+ void * /* user_data */,
+ cl_int * /* errcode_ret */ ) CL_API_SUFFIX__VERSION_1_2;
+
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clUnloadPlatformCompiler(cl_platform_id /* platform */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetProgramInfo(cl_program /* program */,
+ cl_program_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetProgramBuildInfo(cl_program /* program */,
+ cl_device_id /* device */,
+ cl_program_build_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+/* Kernel Object APIs */
+extern CL_API_ENTRY cl_kernel CL_API_CALL
+clCreateKernel(cl_program /* program */,
+ const char * /* kernel_name */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clCreateKernelsInProgram(cl_program /* program */,
+ cl_uint /* num_kernels */,
+ cl_kernel * /* kernels */,
+ cl_uint * /* num_kernels_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clRetainKernel(cl_kernel /* kernel */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clReleaseKernel(cl_kernel /* kernel */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clSetKernelArg(cl_kernel /* kernel */,
+ cl_uint /* arg_index */,
+ size_t /* arg_size */,
+ const void * /* arg_value */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetKernelInfo(cl_kernel /* kernel */,
+ cl_kernel_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetKernelArgInfo(cl_kernel /* kernel */,
+ cl_uint /* arg_indx */,
+ cl_kernel_arg_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetKernelWorkGroupInfo(cl_kernel /* kernel */,
+ cl_device_id /* device */,
+ cl_kernel_work_group_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+/* Event Object APIs */
+extern CL_API_ENTRY cl_int CL_API_CALL
+clWaitForEvents(cl_uint /* num_events */,
+ const cl_event * /* event_list */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetEventInfo(cl_event /* event */,
+ cl_event_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_event CL_API_CALL
+clCreateUserEvent(cl_context /* context */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_1;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clRetainEvent(cl_event /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clReleaseEvent(cl_event /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clSetUserEventStatus(cl_event /* event */,
+ cl_int /* execution_status */) CL_API_SUFFIX__VERSION_1_1;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clSetEventCallback( cl_event /* event */,
+ cl_int /* command_exec_callback_type */,
+ void (CL_CALLBACK * /* pfn_notify */)(cl_event, cl_int, void *),
+ void * /* user_data */) CL_API_SUFFIX__VERSION_1_1;
+
+/* Profiling APIs */
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetEventProfilingInfo(cl_event /* event */,
+ cl_profiling_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+/* Flush and Finish APIs */
+extern CL_API_ENTRY cl_int CL_API_CALL
+clFlush(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clFinish(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;
+
+/* Enqueued Commands APIs */
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueReadBuffer(cl_command_queue /* command_queue */,
+ cl_mem /* buffer */,
+ cl_bool /* blocking_read */,
+ size_t /* offset */,
+ size_t /* size */,
+ void * /* ptr */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueReadBufferRect(cl_command_queue /* command_queue */,
+ cl_mem /* buffer */,
+ cl_bool /* blocking_read */,
+ const size_t * /* buffer_offset */,
+ const size_t * /* host_offset */,
+ const size_t * /* region */,
+ size_t /* buffer_row_pitch */,
+ size_t /* buffer_slice_pitch */,
+ size_t /* host_row_pitch */,
+ size_t /* host_slice_pitch */,
+ void * /* ptr */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_1;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueWriteBuffer(cl_command_queue /* command_queue */,
+ cl_mem /* buffer */,
+ cl_bool /* blocking_write */,
+ size_t /* offset */,
+ size_t /* size */,
+ const void * /* ptr */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueWriteBufferRect(cl_command_queue /* command_queue */,
+ cl_mem /* buffer */,
+ cl_bool /* blocking_write */,
+ const size_t * /* buffer_offset */,
+ const size_t * /* host_offset */,
+ const size_t * /* region */,
+ size_t /* buffer_row_pitch */,
+ size_t /* buffer_slice_pitch */,
+ size_t /* host_row_pitch */,
+ size_t /* host_slice_pitch */,
+ const void * /* ptr */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_1;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueFillBuffer(cl_command_queue /* command_queue */,
+ cl_mem /* buffer */,
+ const void * /* pattern */,
+ size_t /* pattern_size */,
+ size_t /* offset */,
+ size_t /* size */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueCopyBuffer(cl_command_queue /* command_queue */,
+ cl_mem /* src_buffer */,
+ cl_mem /* dst_buffer */,
+ size_t /* src_offset */,
+ size_t /* dst_offset */,
+ size_t /* size */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueCopyBufferRect(cl_command_queue /* command_queue */,
+ cl_mem /* src_buffer */,
+ cl_mem /* dst_buffer */,
+ const size_t * /* src_origin */,
+ const size_t * /* dst_origin */,
+ const size_t * /* region */,
+ size_t /* src_row_pitch */,
+ size_t /* src_slice_pitch */,
+ size_t /* dst_row_pitch */,
+ size_t /* dst_slice_pitch */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_1;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueReadImage(cl_command_queue /* command_queue */,
+ cl_mem /* image */,
+ cl_bool /* blocking_read */,
+ const size_t * /* origin[3] */,
+ const size_t * /* region[3] */,
+ size_t /* row_pitch */,
+ size_t /* slice_pitch */,
+ void * /* ptr */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueWriteImage(cl_command_queue /* command_queue */,
+ cl_mem /* image */,
+ cl_bool /* blocking_write */,
+ const size_t * /* origin[3] */,
+ const size_t * /* region[3] */,
+ size_t /* input_row_pitch */,
+ size_t /* input_slice_pitch */,
+ const void * /* ptr */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueFillImage(cl_command_queue /* command_queue */,
+ cl_mem /* image */,
+ const void * /* fill_color */,
+ const size_t * /* origin[3] */,
+ const size_t * /* region[3] */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueCopyImage(cl_command_queue /* command_queue */,
+ cl_mem /* src_image */,
+ cl_mem /* dst_image */,
+ const size_t * /* src_origin[3] */,
+ const size_t * /* dst_origin[3] */,
+ const size_t * /* region[3] */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueCopyImageToBuffer(cl_command_queue /* command_queue */,
+ cl_mem /* src_image */,
+ cl_mem /* dst_buffer */,
+ const size_t * /* src_origin[3] */,
+ const size_t * /* region[3] */,
+ size_t /* dst_offset */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueCopyBufferToImage(cl_command_queue /* command_queue */,
+ cl_mem /* src_buffer */,
+ cl_mem /* dst_image */,
+ size_t /* src_offset */,
+ const size_t * /* dst_origin[3] */,
+ const size_t * /* region[3] */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY void * CL_API_CALL
+clEnqueueMapBuffer(cl_command_queue /* command_queue */,
+ cl_mem /* buffer */,
+ cl_bool /* blocking_map */,
+ cl_map_flags /* map_flags */,
+ size_t /* offset */,
+ size_t /* size */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY void * CL_API_CALL
+clEnqueueMapImage(cl_command_queue /* command_queue */,
+ cl_mem /* image */,
+ cl_bool /* blocking_map */,
+ cl_map_flags /* map_flags */,
+ const size_t * /* origin[3] */,
+ const size_t * /* region[3] */,
+ size_t * /* image_row_pitch */,
+ size_t * /* image_slice_pitch */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueUnmapMemObject(cl_command_queue /* command_queue */,
+ cl_mem /* memobj */,
+ void * /* mapped_ptr */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueMigrateMemObjects(cl_command_queue /* command_queue */,
+ cl_uint /* num_mem_objects */,
+ const cl_mem * /* mem_objects */,
+ cl_mem_migration_flags /* flags */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueNDRangeKernel(cl_command_queue /* command_queue */,
+ cl_kernel /* kernel */,
+ cl_uint /* work_dim */,
+ const size_t * /* global_work_offset */,
+ const size_t * /* global_work_size */,
+ const size_t * /* local_work_size */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueTask(cl_command_queue /* command_queue */,
+ cl_kernel /* kernel */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueNativeKernel(cl_command_queue /* command_queue */,
+ void (CL_CALLBACK * /*user_func*/)(void *),
+ void * /* args */,
+ size_t /* cb_args */,
+ cl_uint /* num_mem_objects */,
+ const cl_mem * /* mem_list */,
+ const void ** /* args_mem_loc */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueMarkerWithWaitList(cl_command_queue /* command_queue */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueBarrierWithWaitList(cl_command_queue /* command_queue */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2;
+
+
+/* Extension function access
+ *
+ * Returns the extension function address for the given function name,
+ * or NULL if a valid function can not be found. The client must
+ * check to make sure the address is not NULL, before using or
+ * calling the returned function address.
+ */
+extern CL_API_ENTRY void * CL_API_CALL
+clGetExtensionFunctionAddressForPlatform(cl_platform_id /* platform */,
+ const char * /* func_name */) CL_API_SUFFIX__VERSION_1_2;
+
+
+/* Deprecated OpenCL 1.1 APIs*/
+extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
+clCreateImage2D(cl_context /* context */,
+ cl_mem_flags /* flags */,
+ const cl_image_format * /* image_format */,
+ size_t /* image_width */,
+ size_t /* image_height */,
+ size_t /* image_row_pitch */,
+ void * /* host_ptr */,
+ cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+
+extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
+clCreateImage3D(cl_context /* context */,
+ cl_mem_flags /* flags */,
+ const cl_image_format * /* image_format */,
+ size_t /* image_width */,
+ size_t /* image_height */,
+ size_t /* image_depth */,
+ size_t /* image_row_pitch */,
+ size_t /* image_slice_pitch */,
+ void * /* host_ptr */,
+ cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+
+extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL
+clEnqueueMarker(cl_command_queue /* command_queue */,
+ cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+
+extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL
+clEnqueueWaitForEvents(cl_command_queue /* command_queue */,
+ cl_uint /* num_events */,
+ const cl_event * /* event_list */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+
+extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL
+clEnqueueBarrier(cl_command_queue /* command_queue */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+
+extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL
+clUnloadCompiler(void) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+
+extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED void * CL_API_CALL
+clGetExtensionFunctionAddress(const char * /* func_name */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __OPENCL_CL_H */
+
diff --git a/include/CL/cl.hpp b/include/CL/cl.hpp
new file mode 100644
index 0000000000..38fac1962a
--- /dev/null
+++ b/include/CL/cl.hpp
@@ -0,0 +1,12452 @@
+/*******************************************************************************
+ * Copyright (c) 2008-2013 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ ******************************************************************************/
+
+/*! \file
+ *
+ * \brief C++ bindings for OpenCL 1.0 (rev 48), OpenCL 1.1 (rev 33) and
+ * OpenCL 1.2 (rev 15)
+ * \author Benedict R. Gaster, Laurent Morichetti and Lee Howes
+ *
+ * Additions and fixes from:
+ * Brian Cole, March 3rd 2010 and April 2012
+ * Matt Gruenke, April 2012.
+ * Bruce Merry, February 2013.
+ * Tom Deakin and Simon McIntosh-Smith, July 2013
+ *
+ * \version 1.2.6
+ * \date August 2013
+ *
+ * Optional extension support
+ *
+ * cl
+ * cl_ext_device_fission
+ * #define USE_CL_DEVICE_FISSION
+ */
+
+/*! \mainpage
+ * \section intro Introduction
+ * For many large applications C++ is the language of choice and so it seems
+ * reasonable to define C++ bindings for OpenCL.
+ *
+ *
+ * The interface is contained with a single C++ header file \em cl.hpp and all
+ * definitions are contained within the namespace \em cl. There is no additional
+ * requirement to include \em cl.h and to use either the C++ or original C
+ * bindings it is enough to simply include \em cl.hpp.
+ *
+ * The bindings themselves are lightweight and correspond closely to the
+ * underlying C API. Using the C++ bindings introduces no additional execution
+ * overhead.
+ *
+ * For detail documentation on the bindings see:
+ *
+ * The OpenCL C++ Wrapper API 1.2 (revision 09)
+ * http://www.khronos.org/registry/cl/specs/opencl-cplusplus-1.2.pdf
+ *
+ * \section example Example
+ *
+ * The following example shows a general use case for the C++
+ * bindings, including support for the optional exception feature and
+ * also the supplied vector and string classes, see following sections for
+ * decriptions of these features.
+ *
+ * \code
+ * #define __CL_ENABLE_EXCEPTIONS
+ *
+ * #if defined(__APPLE__) || defined(__MACOSX)
+ * #include <OpenCL/cl.hpp>
+ * #else
+ * #include <CL/cl.hpp>
+ * #endif
+ * #include <cstdio>
+ * #include <cstdlib>
+ * #include <iostream>
+ *
+ * const char * helloStr = "__kernel void "
+ * "hello(void) "
+ * "{ "
+ * " "
+ * "} ";
+ *
+ * int
+ * main(void)
+ * {
+ * cl_int err = CL_SUCCESS;
+ * try {
+ *
+ * std::vector<cl::Platform> platforms;
+ * cl::Platform::get(&platforms);
+ * if (platforms.size() == 0) {
+ * std::cout << "Platform size 0\n";
+ * return -1;
+ * }
+ *
+ * cl_context_properties properties[] =
+ * { CL_CONTEXT_PLATFORM, (cl_context_properties)(platforms[0])(), 0};
+ * cl::Context context(CL_DEVICE_TYPE_CPU, properties);
+ *
+ * std::vector<cl::Device> devices = context.getInfo<CL_CONTEXT_DEVICES>();
+ *
+ * cl::Program::Sources source(1,
+ * std::make_pair(helloStr,strlen(helloStr)));
+ * cl::Program program_ = cl::Program(context, source);
+ * program_.build(devices);
+ *
+ * cl::Kernel kernel(program_, "hello", &err);
+ *
+ * cl::Event event;
+ * cl::CommandQueue queue(context, devices[0], 0, &err);
+ * queue.enqueueNDRangeKernel(
+ * kernel,
+ * cl::NullRange,
+ * cl::NDRange(4,4),
+ * cl::NullRange,
+ * NULL,
+ * &event);
+ *
+ * event.wait();
+ * }
+ * catch (cl::Error err) {
+ * std::cerr
+ * << "ERROR: "
+ * << err.what()
+ * << "("
+ * << err.err()
+ * << ")"
+ * << std::endl;
+ * }
+ *
+ * return EXIT_SUCCESS;
+ * }
+ *
+ * \endcode
+ *
+ */
+#ifndef CL_HPP_
+#define CL_HPP_
+
+#ifdef _WIN32
+
+#include <windows.h>
+#include <malloc.h>
+#include <iterator>
+#include <intrin.h>
+
+#if defined(__CL_ENABLE_EXCEPTIONS)
+#include <exception>
+#endif // #if defined(__CL_ENABLE_EXCEPTIONS)
+
+#pragma push_macro("max")
+#undef max
+#if defined(USE_DX_INTEROP)
+#include <CL/cl_d3d10.h>
+#include <CL/cl_dx9_media_sharing.h>
+#endif
+#endif // _WIN32
+
+//
+#if defined(USE_CL_DEVICE_FISSION)
+#include <CL/cl_ext.h>
+#endif
+
+#if defined(__APPLE__) || defined(__MACOSX)
+#include <OpenGL/OpenGL.h>
+#include <OpenCL/opencl.h>
+#include <libkern/OSAtomic.h>
+#else
+#include <GL/gl.h>
+#include <CL/opencl.h>
+#endif // !__APPLE__
+
+// To avoid accidentally taking ownership of core OpenCL types
+// such as cl_kernel constructors are made explicit
+// under OpenCL 1.2
+#if defined(CL_VERSION_1_2) && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+#define __CL_EXPLICIT_CONSTRUCTORS explicit
+#else // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+#define __CL_EXPLICIT_CONSTRUCTORS
+#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+
+// Define deprecated prefixes and suffixes to ensure compilation
+// in case they are not pre-defined
+#if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
+#define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+#endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
+#if !defined(CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED)
+#define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+#endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
+
+#if !defined(CL_CALLBACK)
+#define CL_CALLBACK
+#endif //CL_CALLBACK
+
+#include <utility>
+#include <limits>
+
+#if !defined(__NO_STD_VECTOR)
+#include <vector>
+#endif
+
+#if !defined(__NO_STD_STRING)
+#include <string>
+#endif
+
+#if defined(linux) || defined(__APPLE__) || defined(__MACOSX)
+#include <alloca.h>
+
+#include <emmintrin.h>
+#include <xmmintrin.h>
+#endif // linux
+
+#include <cstring>
+
+
+/*! \namespace cl
+ *
+ * \brief The OpenCL C++ bindings are defined within this namespace.
+ *
+ */
+namespace cl {
+
+class Memory;
+
+/**
+ * Deprecated APIs for 1.2
+ */
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) || (defined(CL_VERSION_1_1) && !defined(CL_VERSION_1_2))
+#define __INIT_CL_EXT_FCN_PTR(name) \
+ if(!pfn_##name) { \
+ pfn_##name = (PFN_##name) \
+ clGetExtensionFunctionAddress(#name); \
+ if(!pfn_##name) { \
+ } \
+ }
+#endif // #if defined(CL_VERSION_1_1)
+
+#if defined(CL_VERSION_1_2)
+#define __INIT_CL_EXT_FCN_PTR_PLATFORM(platform, name) \
+ if(!pfn_##name) { \
+ pfn_##name = (PFN_##name) \
+ clGetExtensionFunctionAddressForPlatform(platform, #name); \
+ if(!pfn_##name) { \
+ } \
+ }
+#endif // #if defined(CL_VERSION_1_1)
+
+class Program;
+class Device;
+class Context;
+class CommandQueue;
+class Memory;
+class Buffer;
+
+#if defined(__CL_ENABLE_EXCEPTIONS)
+/*! \brief Exception class
+ *
+ * This may be thrown by API functions when __CL_ENABLE_EXCEPTIONS is defined.
+ */
+class Error : public std::exception
+{
+private:
+ cl_int err_;
+ const char * errStr_;
+public:
+ /*! \brief Create a new CL error exception for a given error code
+ * and corresponding message.
+ *
+ * \param err error code value.
+ *
+ * \param errStr a descriptive string that must remain in scope until
+ * handling of the exception has concluded. If set, it
+ * will be returned by what().
+ */
+ Error(cl_int err, const char * errStr = NULL) : err_(err), errStr_(errStr)
+ {}
+
+ ~Error() throw() {}
+
+ /*! \brief Get error string associated with exception
+ *
+ * \return A memory pointer to the error message string.
+ */
+ virtual const char * what() const throw ()
+ {
+ if (errStr_ == NULL) {
+ return "empty";
+ }
+ else {
+ return errStr_;
+ }
+ }
+
+ /*! \brief Get error code associated with exception
+ *
+ * \return The error code.
+ */
+ cl_int err(void) const { return err_; }
+};
+
+#define __ERR_STR(x) #x
+#else
+#define __ERR_STR(x) NULL
+#endif // __CL_ENABLE_EXCEPTIONS
+
+
+namespace detail
+{
+#if defined(__CL_ENABLE_EXCEPTIONS)
+static inline cl_int errHandler (
+ cl_int err,
+ const char * errStr = NULL)
+{
+ if (err != CL_SUCCESS) {
+ throw Error(err, errStr);
+ }
+ return err;
+}
+#else
+static inline cl_int errHandler (cl_int err, const char * errStr = NULL)
+{
+ (void) errStr; // suppress unused variable warning
+ return err;
+}
+#endif // __CL_ENABLE_EXCEPTIONS
+}
+
+
+
+//! \cond DOXYGEN_DETAIL
+#if !defined(__CL_USER_OVERRIDE_ERROR_STRINGS)
+#define __GET_DEVICE_INFO_ERR __ERR_STR(clGetDeviceInfo)
+#define __GET_PLATFORM_INFO_ERR __ERR_STR(clGetPlatformInfo)
+#define __GET_DEVICE_IDS_ERR __ERR_STR(clGetDeviceIDs)
+#define __GET_PLATFORM_IDS_ERR __ERR_STR(clGetPlatformIDs)
+#define __GET_CONTEXT_INFO_ERR __ERR_STR(clGetContextInfo)
+#define __GET_EVENT_INFO_ERR __ERR_STR(clGetEventInfo)
+#define __GET_EVENT_PROFILE_INFO_ERR __ERR_STR(clGetEventProfileInfo)
+#define __GET_MEM_OBJECT_INFO_ERR __ERR_STR(clGetMemObjectInfo)
+#define __GET_IMAGE_INFO_ERR __ERR_STR(clGetImageInfo)
+#define __GET_SAMPLER_INFO_ERR __ERR_STR(clGetSamplerInfo)
+#define __GET_KERNEL_INFO_ERR __ERR_STR(clGetKernelInfo)
+#if defined(CL_VERSION_1_2)
+#define __GET_KERNEL_ARG_INFO_ERR __ERR_STR(clGetKernelArgInfo)
+#endif // #if defined(CL_VERSION_1_2)
+#define __GET_KERNEL_WORK_GROUP_INFO_ERR __ERR_STR(clGetKernelWorkGroupInfo)
+#define __GET_PROGRAM_INFO_ERR __ERR_STR(clGetProgramInfo)
+#define __GET_PROGRAM_BUILD_INFO_ERR __ERR_STR(clGetProgramBuildInfo)
+#define __GET_COMMAND_QUEUE_INFO_ERR __ERR_STR(clGetCommandQueueInfo)
+
+#define __CREATE_CONTEXT_ERR __ERR_STR(clCreateContext)
+#define __CREATE_CONTEXT_FROM_TYPE_ERR __ERR_STR(clCreateContextFromType)
+#define __GET_SUPPORTED_IMAGE_FORMATS_ERR __ERR_STR(clGetSupportedImageFormats)
+
+#define __CREATE_BUFFER_ERR __ERR_STR(clCreateBuffer)
+#define __COPY_ERR __ERR_STR(cl::copy)
+#define __CREATE_SUBBUFFER_ERR __ERR_STR(clCreateSubBuffer)
+#define __CREATE_GL_BUFFER_ERR __ERR_STR(clCreateFromGLBuffer)
+#define __CREATE_GL_RENDER_BUFFER_ERR __ERR_STR(clCreateFromGLBuffer)
+#define __GET_GL_OBJECT_INFO_ERR __ERR_STR(clGetGLObjectInfo)
+#if defined(CL_VERSION_1_2)
+#define __CREATE_IMAGE_ERR __ERR_STR(clCreateImage)
+#define __CREATE_GL_TEXTURE_ERR __ERR_STR(clCreateFromGLTexture)
+#define __IMAGE_DIMENSION_ERR __ERR_STR(Incorrect image dimensions)
+#endif // #if defined(CL_VERSION_1_2)
+#define __CREATE_SAMPLER_ERR __ERR_STR(clCreateSampler)
+#define __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR __ERR_STR(clSetMemObjectDestructorCallback)
+
+#define __CREATE_USER_EVENT_ERR __ERR_STR(clCreateUserEvent)
+#define __SET_USER_EVENT_STATUS_ERR __ERR_STR(clSetUserEventStatus)
+#define __SET_EVENT_CALLBACK_ERR __ERR_STR(clSetEventCallback)
+#define __WAIT_FOR_EVENTS_ERR __ERR_STR(clWaitForEvents)
+
+#define __CREATE_KERNEL_ERR __ERR_STR(clCreateKernel)
+#define __SET_KERNEL_ARGS_ERR __ERR_STR(clSetKernelArg)
+#define __CREATE_PROGRAM_WITH_SOURCE_ERR __ERR_STR(clCreateProgramWithSource)
+#define __CREATE_PROGRAM_WITH_BINARY_ERR __ERR_STR(clCreateProgramWithBinary)
+#if defined(CL_VERSION_1_2)
+#define __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR __ERR_STR(clCreateProgramWithBuiltInKernels)
+#endif // #if defined(CL_VERSION_1_2)
+#define __BUILD_PROGRAM_ERR __ERR_STR(clBuildProgram)
+#if defined(CL_VERSION_1_2)
+#define __COMPILE_PROGRAM_ERR __ERR_STR(clCompileProgram)
+
+#endif // #if defined(CL_VERSION_1_2)
+#define __CREATE_KERNELS_IN_PROGRAM_ERR __ERR_STR(clCreateKernelsInProgram)
+
+#define __CREATE_COMMAND_QUEUE_ERR __ERR_STR(clCreateCommandQueue)
+#define __SET_COMMAND_QUEUE_PROPERTY_ERR __ERR_STR(clSetCommandQueueProperty)
+#define __ENQUEUE_READ_BUFFER_ERR __ERR_STR(clEnqueueReadBuffer)
+#define __ENQUEUE_READ_BUFFER_RECT_ERR __ERR_STR(clEnqueueReadBufferRect)
+#define __ENQUEUE_WRITE_BUFFER_ERR __ERR_STR(clEnqueueWriteBuffer)
+#define __ENQUEUE_WRITE_BUFFER_RECT_ERR __ERR_STR(clEnqueueWriteBufferRect)
+#define __ENQEUE_COPY_BUFFER_ERR __ERR_STR(clEnqueueCopyBuffer)
+#define __ENQEUE_COPY_BUFFER_RECT_ERR __ERR_STR(clEnqueueCopyBufferRect)
+#define __ENQUEUE_FILL_BUFFER_ERR __ERR_STR(clEnqueueFillBuffer)
+#define __ENQUEUE_READ_IMAGE_ERR __ERR_STR(clEnqueueReadImage)
+#define __ENQUEUE_WRITE_IMAGE_ERR __ERR_STR(clEnqueueWriteImage)
+#define __ENQUEUE_COPY_IMAGE_ERR __ERR_STR(clEnqueueCopyImage)
+#define __ENQUEUE_FILL_IMAGE_ERR __ERR_STR(clEnqueueFillImage)
+#define __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR __ERR_STR(clEnqueueCopyImageToBuffer)
+#define __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR __ERR_STR(clEnqueueCopyBufferToImage)
+#define __ENQUEUE_MAP_BUFFER_ERR __ERR_STR(clEnqueueMapBuffer)
+#define __ENQUEUE_MAP_IMAGE_ERR __ERR_STR(clEnqueueMapImage)
+#define __ENQUEUE_UNMAP_MEM_OBJECT_ERR __ERR_STR(clEnqueueUnMapMemObject)
+#define __ENQUEUE_NDRANGE_KERNEL_ERR __ERR_STR(clEnqueueNDRangeKernel)
+#define __ENQUEUE_TASK_ERR __ERR_STR(clEnqueueTask)
+#define __ENQUEUE_NATIVE_KERNEL __ERR_STR(clEnqueueNativeKernel)
+#if defined(CL_VERSION_1_2)
+#define __ENQUEUE_MIGRATE_MEM_OBJECTS_ERR __ERR_STR(clEnqueueMigrateMemObjects)
+#endif // #if defined(CL_VERSION_1_2)
+
+#define __ENQUEUE_ACQUIRE_GL_ERR __ERR_STR(clEnqueueAcquireGLObjects)
+#define __ENQUEUE_RELEASE_GL_ERR __ERR_STR(clEnqueueReleaseGLObjects)
+
+
+#define __RETAIN_ERR __ERR_STR(Retain Object)
+#define __RELEASE_ERR __ERR_STR(Release Object)
+#define __FLUSH_ERR __ERR_STR(clFlush)
+#define __FINISH_ERR __ERR_STR(clFinish)
+#define __VECTOR_CAPACITY_ERR __ERR_STR(Vector capacity error)
+
+/**
+ * CL 1.2 version that uses device fission.
+ */
+#if defined(CL_VERSION_1_2)
+#define __CREATE_SUB_DEVICES __ERR_STR(clCreateSubDevices)
+#else
+#define __CREATE_SUB_DEVICES __ERR_STR(clCreateSubDevicesEXT)
+#endif // #if defined(CL_VERSION_1_2)
+
+/**
+ * Deprecated APIs for 1.2
+ */
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) || (defined(CL_VERSION_1_1) && !defined(CL_VERSION_1_2))
+#define __ENQUEUE_MARKER_ERR __ERR_STR(clEnqueueMarker)
+#define __ENQUEUE_WAIT_FOR_EVENTS_ERR __ERR_STR(clEnqueueWaitForEvents)
+#define __ENQUEUE_BARRIER_ERR __ERR_STR(clEnqueueBarrier)
+#define __UNLOAD_COMPILER_ERR __ERR_STR(clUnloadCompiler)
+#define __CREATE_GL_TEXTURE_2D_ERR __ERR_STR(clCreateFromGLTexture2D)
+#define __CREATE_GL_TEXTURE_3D_ERR __ERR_STR(clCreateFromGLTexture3D)
+#define __CREATE_IMAGE2D_ERR __ERR_STR(clCreateImage2D)
+#define __CREATE_IMAGE3D_ERR __ERR_STR(clCreateImage3D)
+#endif // #if defined(CL_VERSION_1_1)
+
+#endif // __CL_USER_OVERRIDE_ERROR_STRINGS
+//! \endcond
+
+/**
+ * CL 1.2 marker and barrier commands
+ */
+#if defined(CL_VERSION_1_2)
+#define __ENQUEUE_MARKER_WAIT_LIST_ERR __ERR_STR(clEnqueueMarkerWithWaitList)
+#define __ENQUEUE_BARRIER_WAIT_LIST_ERR __ERR_STR(clEnqueueBarrierWithWaitList)
+#endif // #if defined(CL_VERSION_1_2)
+
+#if !defined(__USE_DEV_STRING) && !defined(__NO_STD_STRING)
+typedef std::string STRING_CLASS;
+#elif !defined(__USE_DEV_STRING)
+
+/*! \class string
+ * \brief Simple string class, that provides a limited subset of std::string
+ * functionality but avoids many of the issues that come with that class.
+
+ * \note Deprecated. Please use std::string as default or
+ * re-define the string class to match the std::string
+ * interface by defining STRING_CLASS
+ */
+class CL_EXT_PREFIX__VERSION_1_1_DEPRECATED string CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+{
+private:
+ ::size_t size_;
+ char * str_;
+public:
+ //! \brief Constructs an empty string, allocating no memory.
+ string(void) : size_(0), str_(NULL)
+ {
+ }
+
+ /*! \brief Constructs a string populated from an arbitrary value of
+ * specified size.
+ *
+ * An extra '\0' is added, in case none was contained in str.
+ *
+ * \param str the initial value of the string instance. Note that '\0'
+ * characters receive no special treatment. If NULL,
+ * the string is left empty, with a size of 0.
+ *
+ * \param size the number of characters to copy from str.
+ */
+ string(const char * str, ::size_t size) :
+ size_(size),
+ str_(NULL)
+ {
+ if( size > 0 ) {
+ str_ = new char[size_+1];
+ if (str_ != NULL) {
+ memcpy(str_, str, size_ * sizeof(char));
+ str_[size_] = '\0';
+ }
+ else {
+ size_ = 0;
+ }
+ }
+ }
+
+ /*! \brief Constructs a string populated from a null-terminated value.
+ *
+ * \param str the null-terminated initial value of the string instance.
+ * If NULL, the string is left empty, with a size of 0.
+ */
+ string(const char * str) :
+ size_(0),
+ str_(NULL)
+ {
+ if( str ) {
+ size_= ::strlen(str);
+ }
+ if( size_ > 0 ) {
+ str_ = new char[size_ + 1];
+ if (str_ != NULL) {
+ memcpy(str_, str, (size_ + 1) * sizeof(char));
+ }
+ }
+ }
+
+ void resize( ::size_t n )
+ {
+ if( size_ == n ) {
+ return;
+ }
+ if (n == 0) {
+ if( str_ ) {
+ delete [] str_;
+ }
+ str_ = NULL;
+ size_ = 0;
+ }
+ else {
+ char *newString = new char[n + 1];
+ int copySize = n;
+ if( size_ < n ) {
+ copySize = size_;
+ }
+ size_ = n;
+
+ if(str_) {
+ memcpy(newString, str_, (copySize + 1) * sizeof(char));
+ }
+ if( copySize < size_ ) {
+ memset(newString + copySize, 0, size_ - copySize);
+ }
+ newString[size_] = '\0';
+
+ delete [] str_;
+ str_ = newString;
+ }
+ }
+
+ const char& operator[] ( ::size_t pos ) const
+ {
+ return str_[pos];
+ }
+
+ char& operator[] ( ::size_t pos )
+ {
+ return str_[pos];
+ }
+
+ /*! \brief Copies the value of another string to this one.
+ *
+ * \param rhs the string to copy.
+ *
+ * \returns a reference to the modified instance.
+ */
+ string& operator=(const string& rhs)
+ {
+ if (this == &rhs) {
+ return *this;
+ }
+
+ if( str_ != NULL ) {
+ delete [] str_;
+ str_ = NULL;
+ size_ = 0;
+ }
+
+ if (rhs.size_ == 0 || rhs.str_ == NULL) {
+ str_ = NULL;
+ size_ = 0;
+ }
+ else {
+ str_ = new char[rhs.size_ + 1];
+ size_ = rhs.size_;
+
+ if (str_ != NULL) {
+ memcpy(str_, rhs.str_, (size_ + 1) * sizeof(char));
+ }
+ else {
+ size_ = 0;
+ }
+ }
+
+ return *this;
+ }
+
+ /*! \brief Constructs a string by copying the value of another instance.
+ *
+ * \param rhs the string to copy.
+ */
+ string(const string& rhs) :
+ size_(0),
+ str_(NULL)
+ {
+ *this = rhs;
+ }
+
+ //! \brief Destructor - frees memory used to hold the current value.
+ ~string()
+ {
+ delete[] str_;
+ str_ = NULL;
+ }
+
+ //! \brief Queries the length of the string, excluding any added '\0's.
+ ::size_t size(void) const { return size_; }
+
+ //! \brief Queries the length of the string, excluding any added '\0's.
+ ::size_t length(void) const { return size(); }
+
+ /*! \brief Returns a pointer to the private copy held by this instance,
+ * or "" if empty/unset.
+ */
+ const char * c_str(void) const { return (str_) ? str_ : "";}
+};
+typedef cl::string STRING_CLASS;
+#endif // #elif !defined(__USE_DEV_STRING)
+
+#if !defined(__USE_DEV_VECTOR) && !defined(__NO_STD_VECTOR)
+#define VECTOR_CLASS std::vector
+#elif !defined(__USE_DEV_VECTOR)
+#define VECTOR_CLASS cl::vector
+
+#if !defined(__MAX_DEFAULT_VECTOR_SIZE)
+#define __MAX_DEFAULT_VECTOR_SIZE 10
+#endif
+
+/*! \class vector
+ * \brief Fixed sized vector implementation that mirroring
+ *
+ * \note Deprecated. Please use std::vector as default or
+ * re-define the vector class to match the std::vector
+ * interface by defining VECTOR_CLASS
+
+ * \note Not recommended for use with custom objects as
+ * current implementation will construct N elements
+ *
+ * std::vector functionality.
+ * \brief Fixed sized vector compatible with std::vector.
+ *
+ * \note
+ * This differs from std::vector<> not just in memory allocation,
+ * but also in terms of when members are constructed, destroyed,
+ * and assigned instead of being copy constructed.
+ *
+ * \param T type of element contained in the vector.
+ *
+ * \param N maximum size of the vector.
+ */
+template <typename T, unsigned int N = __MAX_DEFAULT_VECTOR_SIZE>
+class CL_EXT_PREFIX__VERSION_1_1_DEPRECATED vector CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+{
+private:
+ T data_[N];
+ unsigned int size_;
+
+public:
+ //! \brief Constructs an empty vector with no memory allocated.
+ vector() :
+ size_(static_cast<unsigned int>(0))
+ {}
+
+ //! \brief Deallocates the vector's memory and destroys all of its elements.
+ ~vector()
+ {
+ clear();
+ }
+
+ //! \brief Returns the number of elements currently contained.
+ unsigned int size(void) const
+ {
+ return size_;
+ }
+
+ /*! \brief Empties the vector of all elements.
+ * \note
+ * This does not deallocate memory but will invoke destructors
+ * on contained elements.
+ */
+ void clear()
+ {
+ while(!empty()) {
+ pop_back();
+ }
+ }
+
+ /*! \brief Appends an element after the last valid element.
+ * Calling this on a vector that has reached capacity will throw an
+ * exception if exceptions are enabled.
+ */
+ void push_back (const T& x)
+ {
+ if (size() < N) {
+ new (&data_[size_]) T(x);
+ size_++;
+ } else {
+ detail::errHandler(CL_MEM_OBJECT_ALLOCATION_FAILURE, __VECTOR_CAPACITY_ERR);
+ }
+ }
+
+ /*! \brief Removes the last valid element from the vector.
+ * Calling this on an empty vector will throw an exception
+ * if exceptions are enabled.
+ */
+ void pop_back(void)
+ {
+ if (size_ != 0) {
+ --size_;
+ data_[size_].~T();
+ } else {
+ detail::errHandler(CL_MEM_OBJECT_ALLOCATION_FAILURE, __VECTOR_CAPACITY_ERR);
+ }
+ }
+
+ /*! \brief Constructs with a value copied from another.
+ *
+ * \param vec the vector to copy.
+ */
+ vector(const vector<T, N>& vec) :
+ size_(vec.size_)
+ {
+ if (size_ != 0) {
+ assign(vec.begin(), vec.end());
+ }
+ }
+
+ /*! \brief Constructs with a specified number of initial elements.
+ *
+ * \param size number of initial elements.
+ *
+ * \param val value of initial elements.
+ */
+ vector(unsigned int size, const T& val = T()) :
+ size_(0)
+ {
+ for (unsigned int i = 0; i < size; i++) {
+ push_back(val);
+ }
+ }
+
+ /*! \brief Overwrites the current content with that copied from another
+ * instance.
+ *
+ * \param rhs vector to copy.
+ *
+ * \returns a reference to this.
+ */
+ vector<T, N>& operator=(const vector<T, N>& rhs)
+ {
+ if (this == &rhs) {
+ return *this;
+ }
+
+ if (rhs.size_ != 0) {
+ assign(rhs.begin(), rhs.end());
+ } else {
+ clear();
+ }
+
+ return *this;
+ }
+
+ /*! \brief Tests equality against another instance.
+ *
+ * \param vec the vector against which to compare.
+ */
+ bool operator==(vector<T,N> &vec)
+ {
+ if (size() != vec.size()) {
+ return false;
+ }
+
+ for( unsigned int i = 0; i < size(); ++i ) {
+ if( operator[](i) != vec[i] ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ //! \brief Conversion operator to T*.
+ operator T* () { return data_; }
+
+ //! \brief Conversion operator to const T*.
+ operator const T* () const { return data_; }
+
+ //! \brief Tests whether this instance has any elements.
+ bool empty (void) const
+ {
+ return size_==0;
+ }
+
+ //! \brief Returns the maximum number of elements this instance can hold.
+ unsigned int max_size (void) const
+ {
+ return N;
+ }
+
+ //! \brief Returns the maximum number of elements this instance can hold.
+ unsigned int capacity () const
+ {
+ return N;
+ }
+
+ /*! \brief Returns a reference to a given element.
+ *
+ * \param index which element to access. *
+ * \note
+ * The caller is responsible for ensuring index is >= 0 and < size().
+ */
+ T& operator[](int index)
+ {
+ return data_[index];
+ }
+
+ /*! \brief Returns a const reference to a given element.
+ *
+ * \param index which element to access.
+ *
+ * \note
+ * The caller is responsible for ensuring index is >= 0 and < size().
+ */
+ const T& operator[](int index) const
+ {
+ return data_[index];
+ }
+
+ /*! \brief Assigns elements of the vector based on a source iterator range.
+ *
+ * \param start Beginning iterator of source range
+ * \param end Enditerator of source range
+ *
+ * \note
+ * Will throw an exception if exceptions are enabled and size exceeded.
+ */
+ template<class I>
+ void assign(I start, I end)
+ {
+ clear();
+ while(start != end) {
+ push_back(*start);
+ start++;
+ }
+ }
+
+ /*! \class iterator
+ * \brief Const iterator class for vectors
+ */
+ class iterator
+ {
+ private:
+ const vector<T,N> *vec_;
+ int index_;
+
+ /**
+ * Internal iterator constructor to capture reference
+ * to the vector it iterates over rather than taking
+ * the vector by copy.
+ */
+ iterator (const vector<T,N> &vec, int index) :
+ vec_(&vec)
+ {
+ if( !vec.empty() ) {
+ index_ = index;
+ } else {
+ index_ = -1;
+ }
+ }
+
+ public:
+ iterator(void) :
+ index_(-1),
+ vec_(NULL)
+ {
+ }
+
+ iterator(const iterator& rhs) :
+ vec_(rhs.vec_),
+ index_(rhs.index_)
+ {
+ }
+
+ ~iterator(void) {}
+
+ static iterator begin(const cl::vector<T,N> &vec)
+ {
+ iterator i(vec, 0);
+
+ return i;
+ }
+
+ static iterator end(const cl::vector<T,N> &vec)
+ {
+ iterator i(vec, vec.size());
+
+ return i;
+ }
+
+ bool operator==(iterator i)
+ {
+ return ((vec_ == i.vec_) &&
+ (index_ == i.index_));
+ }
+
+ bool operator!=(iterator i)
+ {
+ return (!(*this==i));
+ }
+
+ iterator& operator++()
+ {
+ ++index_;
+ return *this;
+ }
+
+ iterator operator++(int)
+ {
+ iterator retVal(*this);
+ ++index_;
+ return retVal;
+ }
+
+ iterator& operator--()
+ {
+ --index_;
+ return *this;
+ }
+
+ iterator operator--(int)
+ {
+ iterator retVal(*this);
+ --index_;
+ return retVal;
+ }
+
+ const T& operator *() const
+ {
+ return (*vec_)[index_];
+ }
+ };
+
+ iterator begin(void)
+ {
+ return iterator::begin(*this);
+ }
+
+ iterator begin(void) const
+ {
+ return iterator::begin(*this);
+ }
+
+ iterator end(void)
+ {
+ return iterator::end(*this);
+ }
+
+ iterator end(void) const
+ {
+ return iterator::end(*this);
+ }
+
+ T& front(void)
+ {
+ return data_[0];
+ }
+
+ T& back(void)
+ {
+ return data_[size_];
+ }
+
+ const T& front(void) const
+ {
+ return data_[0];
+ }
+
+ const T& back(void) const
+ {
+ return data_[size_-1];
+ }
+};
+#endif // #if !defined(__USE_DEV_VECTOR) && !defined(__NO_STD_VECTOR)
+
+
+
+
+
+namespace detail {
+#define __DEFAULT_NOT_INITIALIZED 1
+#define __DEFAULT_BEING_INITIALIZED 2
+#define __DEFAULT_INITIALIZED 4
+
+ /*
+ * Compare and exchange primitives are needed for handling of defaults
+ */
+ inline int compare_exchange(volatile int * dest, int exchange, int comparand)
+ {
+#ifdef _WIN32
+ return (int)(InterlockedCompareExchange(
+ (volatile long*)dest,
+ (long)exchange,
+ (long)comparand));
+#elif defined(__APPLE__) || defined(__MACOSX)
+ return OSAtomicOr32Orig((uint32_t)exchange, (volatile uint32_t*)dest);
+#else // !_WIN32 || defined(__APPLE__) || defined(__MACOSX)
+ return (__sync_val_compare_and_swap(
+ dest,
+ comparand,
+ exchange));
+#endif // !_WIN32
+ }
+
+ inline void fence() { _mm_mfence(); }
+}; // namespace detail
+
+
+/*! \brief class used to interface between C++ and
+ * OpenCL C calls that require arrays of size_t values, whose
+ * size is known statically.
+ */
+template <int N>
+class size_t
+{
+private:
+ ::size_t data_[N];
+
+public:
+ //! \brief Initialize size_t to all 0s
+ size_t()
+ {
+ for( int i = 0; i < N; ++i ) {
+ data_[i] = 0;
+ }
+ }
+
+ ::size_t& operator[](int index)
+ {
+ return data_[index];
+ }
+
+ const ::size_t& operator[](int index) const
+ {
+ return data_[index];
+ }
+
+ //! \brief Conversion operator to T*.
+ operator ::size_t* () { return data_; }
+
+ //! \brief Conversion operator to const T*.
+ operator const ::size_t* () const { return data_; }
+};
+
+namespace detail {
+
+// Generic getInfoHelper. The final parameter is used to guide overload
+// resolution: the actual parameter passed is an int, which makes this
+// a worse conversion sequence than a specialization that declares the
+// parameter as an int.
+template<typename Functor, typename T>
+inline cl_int getInfoHelper(Functor f, cl_uint name, T* param, long)
+{
+ return f(name, sizeof(T), param, NULL);
+}
+
+// Specialized getInfoHelper for VECTOR_CLASS params
+template <typename Func, typename T>
+inline cl_int getInfoHelper(Func f, cl_uint name, VECTOR_CLASS<T>* param, long)
+{
+ ::size_t required;
+ cl_int err = f(name, 0, NULL, &required);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ T* value = (T*) alloca(required);
+ err = f(name, required, value, NULL);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ param->assign(&value[0], &value[required/sizeof(T)]);
+ return CL_SUCCESS;
+}
+
+/* Specialization for reference-counted types. This depends on the
+ * existence of Wrapper<T>::cl_type, and none of the other types having the
+ * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
+ * does not work, because when using a derived type (e.g. Context) the generic
+ * template will provide a better match.
+ */
+template <typename Func, typename T>
+inline cl_int getInfoHelper(Func f, cl_uint name, VECTOR_CLASS<T>* param, int, typename T::cl_type = 0)
+{
+ ::size_t required;
+ cl_int err = f(name, 0, NULL, &required);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ typename T::cl_type * value = (typename T::cl_type *) alloca(required);
+ err = f(name, required, value, NULL);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ ::size_t elements = required / sizeof(typename T::cl_type);
+ param->assign(&value[0], &value[elements]);
+ for (::size_t i = 0; i < elements; i++)
+ {
+ if (value[i] != NULL)
+ {
+ err = (*param)[i].retain();
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+ }
+ }
+ return CL_SUCCESS;
+}
+
+// Specialized for getInfo<CL_PROGRAM_BINARIES>
+template <typename Func>
+inline cl_int getInfoHelper(Func f, cl_uint name, VECTOR_CLASS<char *>* param, int)
+{
+ cl_int err = f(name, param->size() * sizeof(char *), &(*param)[0], NULL);
+
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ return CL_SUCCESS;
+}
+
+// Specialized GetInfoHelper for STRING_CLASS params
+template <typename Func>
+inline cl_int getInfoHelper(Func f, cl_uint name, STRING_CLASS* param, long)
+{
+ ::size_t required;
+ cl_int err = f(name, 0, NULL, &required);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ char* value = (char*) alloca(required);
+ err = f(name, required, value, NULL);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ *param = value;
+ return CL_SUCCESS;
+}
+
+// Specialized GetInfoHelper for cl::size_t params
+template <typename Func, ::size_t N>
+inline cl_int getInfoHelper(Func f, cl_uint name, size_t<N>* param, long)
+{
+ ::size_t required;
+ cl_int err = f(name, 0, NULL, &required);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ ::size_t* value = (::size_t*) alloca(required);
+ err = f(name, required, value, NULL);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ for(int i = 0; i < N; ++i) {
+ (*param)[i] = value[i];
+ }
+
+ return CL_SUCCESS;
+}
+
+template<typename T> struct ReferenceHandler;
+
+/* Specialization for reference-counted types. This depends on the
+ * existence of Wrapper<T>::cl_type, and none of the other types having the
+ * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
+ * does not work, because when using a derived type (e.g. Context) the generic
+ * template will provide a better match.
+ */
+template<typename Func, typename T>
+inline cl_int getInfoHelper(Func f, cl_uint name, T* param, int, typename T::cl_type = 0)
+{
+ typename T::cl_type value;
+ cl_int err = f(name, sizeof(value), &value, NULL);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+ *param = value;
+ if (value != NULL)
+ {
+ err = param->retain();
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+ }
+ return CL_SUCCESS;
+}
+
+#define __PARAM_NAME_INFO_1_0(F) \
+ F(cl_platform_info, CL_PLATFORM_PROFILE, STRING_CLASS) \
+ F(cl_platform_info, CL_PLATFORM_VERSION, STRING_CLASS) \
+ F(cl_platform_info, CL_PLATFORM_NAME, STRING_CLASS) \
+ F(cl_platform_info, CL_PLATFORM_VENDOR, STRING_CLASS) \
+ F(cl_platform_info, CL_PLATFORM_EXTENSIONS, STRING_CLASS) \
+ \
+ F(cl_device_info, CL_DEVICE_TYPE, cl_device_type) \
+ F(cl_device_info, CL_DEVICE_VENDOR_ID, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_COMPUTE_UNITS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_WORK_GROUP_SIZE, ::size_t) \
+ F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_SIZES, VECTOR_CLASS< ::size_t>) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_CLOCK_FREQUENCY, cl_uint) \
+ F(cl_device_info, CL_DEVICE_ADDRESS_BITS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_READ_IMAGE_ARGS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_WRITE_IMAGE_ARGS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_MEM_ALLOC_SIZE, cl_ulong) \
+ F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_WIDTH, ::size_t) \
+ F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_HEIGHT, ::size_t) \
+ F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_WIDTH, ::size_t) \
+ F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_HEIGHT, ::size_t) \
+ F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_DEPTH, ::size_t) \
+ F(cl_device_info, CL_DEVICE_IMAGE_SUPPORT, cl_bool) \
+ F(cl_device_info, CL_DEVICE_MAX_PARAMETER_SIZE, ::size_t) \
+ F(cl_device_info, CL_DEVICE_MAX_SAMPLERS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MEM_BASE_ADDR_ALIGN, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE, cl_uint) \
+ F(cl_device_info, CL_DEVICE_SINGLE_FP_CONFIG, cl_device_fp_config) \
+ F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, cl_device_mem_cache_type) \
+ F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, cl_uint)\
+ F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_SIZE, cl_ulong) \
+ F(cl_device_info, CL_DEVICE_GLOBAL_MEM_SIZE, cl_ulong) \
+ F(cl_device_info, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, cl_ulong) \
+ F(cl_device_info, CL_DEVICE_MAX_CONSTANT_ARGS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_LOCAL_MEM_TYPE, cl_device_local_mem_type) \
+ F(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE, cl_ulong) \
+ F(cl_device_info, CL_DEVICE_ERROR_CORRECTION_SUPPORT, cl_bool) \
+ F(cl_device_info, CL_DEVICE_PROFILING_TIMER_RESOLUTION, ::size_t) \
+ F(cl_device_info, CL_DEVICE_ENDIAN_LITTLE, cl_bool) \
+ F(cl_device_info, CL_DEVICE_AVAILABLE, cl_bool) \
+ F(cl_device_info, CL_DEVICE_COMPILER_AVAILABLE, cl_bool) \
+ F(cl_device_info, CL_DEVICE_EXECUTION_CAPABILITIES, cl_device_exec_capabilities) \
+ F(cl_device_info, CL_DEVICE_QUEUE_PROPERTIES, cl_command_queue_properties) \
+ F(cl_device_info, CL_DEVICE_PLATFORM, cl_platform_id) \
+ F(cl_device_info, CL_DEVICE_NAME, STRING_CLASS) \
+ F(cl_device_info, CL_DEVICE_VENDOR, STRING_CLASS) \
+ F(cl_device_info, CL_DRIVER_VERSION, STRING_CLASS) \
+ F(cl_device_info, CL_DEVICE_PROFILE, STRING_CLASS) \
+ F(cl_device_info, CL_DEVICE_VERSION, STRING_CLASS) \
+ F(cl_device_info, CL_DEVICE_EXTENSIONS, STRING_CLASS) \
+ \
+ F(cl_context_info, CL_CONTEXT_REFERENCE_COUNT, cl_uint) \
+ F(cl_context_info, CL_CONTEXT_DEVICES, VECTOR_CLASS<Device>) \
+ F(cl_context_info, CL_CONTEXT_PROPERTIES, VECTOR_CLASS<cl_context_properties>) \
+ \
+ F(cl_event_info, CL_EVENT_COMMAND_QUEUE, cl::CommandQueue) \
+ F(cl_event_info, CL_EVENT_COMMAND_TYPE, cl_command_type) \
+ F(cl_event_info, CL_EVENT_REFERENCE_COUNT, cl_uint) \
+ F(cl_event_info, CL_EVENT_COMMAND_EXECUTION_STATUS, cl_uint) \
+ \
+ F(cl_profiling_info, CL_PROFILING_COMMAND_QUEUED, cl_ulong) \
+ F(cl_profiling_info, CL_PROFILING_COMMAND_SUBMIT, cl_ulong) \
+ F(cl_profiling_info, CL_PROFILING_COMMAND_START, cl_ulong) \
+ F(cl_profiling_info, CL_PROFILING_COMMAND_END, cl_ulong) \
+ \
+ F(cl_mem_info, CL_MEM_TYPE, cl_mem_object_type) \
+ F(cl_mem_info, CL_MEM_FLAGS, cl_mem_flags) \
+ F(cl_mem_info, CL_MEM_SIZE, ::size_t) \
+ F(cl_mem_info, CL_MEM_HOST_PTR, void*) \
+ F(cl_mem_info, CL_MEM_MAP_COUNT, cl_uint) \
+ F(cl_mem_info, CL_MEM_REFERENCE_COUNT, cl_uint) \
+ F(cl_mem_info, CL_MEM_CONTEXT, cl::Context) \
+ \
+ F(cl_image_info, CL_IMAGE_FORMAT, cl_image_format) \
+ F(cl_image_info, CL_IMAGE_ELEMENT_SIZE, ::size_t) \
+ F(cl_image_info, CL_IMAGE_ROW_PITCH, ::size_t) \
+ F(cl_image_info, CL_IMAGE_SLICE_PITCH, ::size_t) \
+ F(cl_image_info, CL_IMAGE_WIDTH, ::size_t) \
+ F(cl_image_info, CL_IMAGE_HEIGHT, ::size_t) \
+ F(cl_image_info, CL_IMAGE_DEPTH, ::size_t) \
+ \
+ F(cl_sampler_info, CL_SAMPLER_REFERENCE_COUNT, cl_uint) \
+ F(cl_sampler_info, CL_SAMPLER_CONTEXT, cl::Context) \
+ F(cl_sampler_info, CL_SAMPLER_NORMALIZED_COORDS, cl_addressing_mode) \
+ F(cl_sampler_info, CL_SAMPLER_ADDRESSING_MODE, cl_filter_mode) \
+ F(cl_sampler_info, CL_SAMPLER_FILTER_MODE, cl_bool) \
+ \
+ F(cl_program_info, CL_PROGRAM_REFERENCE_COUNT, cl_uint) \
+ F(cl_program_info, CL_PROGRAM_CONTEXT, cl::Context) \
+ F(cl_program_info, CL_PROGRAM_NUM_DEVICES, cl_uint) \
+ F(cl_program_info, CL_PROGRAM_DEVICES, VECTOR_CLASS<Device>) \
+ F(cl_program_info, CL_PROGRAM_SOURCE, STRING_CLASS) \
+ F(cl_program_info, CL_PROGRAM_BINARY_SIZES, VECTOR_CLASS< ::size_t>) \
+ F(cl_program_info, CL_PROGRAM_BINARIES, VECTOR_CLASS<char *>) \
+ \
+ F(cl_program_build_info, CL_PROGRAM_BUILD_STATUS, cl_build_status) \
+ F(cl_program_build_info, CL_PROGRAM_BUILD_OPTIONS, STRING_CLASS) \
+ F(cl_program_build_info, CL_PROGRAM_BUILD_LOG, STRING_CLASS) \
+ \
+ F(cl_kernel_info, CL_KERNEL_FUNCTION_NAME, STRING_CLASS) \
+ F(cl_kernel_info, CL_KERNEL_NUM_ARGS, cl_uint) \
+ F(cl_kernel_info, CL_KERNEL_REFERENCE_COUNT, cl_uint) \
+ F(cl_kernel_info, CL_KERNEL_CONTEXT, cl::Context) \
+ F(cl_kernel_info, CL_KERNEL_PROGRAM, cl::Program) \
+ \
+ F(cl_kernel_work_group_info, CL_KERNEL_WORK_GROUP_SIZE, ::size_t) \
+ F(cl_kernel_work_group_info, CL_KERNEL_COMPILE_WORK_GROUP_SIZE, cl::size_t<3>) \
+ F(cl_kernel_work_group_info, CL_KERNEL_LOCAL_MEM_SIZE, cl_ulong) \
+ \
+ F(cl_command_queue_info, CL_QUEUE_CONTEXT, cl::Context) \
+ F(cl_command_queue_info, CL_QUEUE_DEVICE, cl::Device) \
+ F(cl_command_queue_info, CL_QUEUE_REFERENCE_COUNT, cl_uint) \
+ F(cl_command_queue_info, CL_QUEUE_PROPERTIES, cl_command_queue_properties)
+
+#if defined(CL_VERSION_1_1)
+#define __PARAM_NAME_INFO_1_1(F) \
+ F(cl_context_info, CL_CONTEXT_NUM_DEVICES, cl_uint)\
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, cl_uint) \
+ F(cl_device_info, CL_DEVICE_DOUBLE_FP_CONFIG, cl_device_fp_config) \
+ F(cl_device_info, CL_DEVICE_HALF_FP_CONFIG, cl_device_fp_config) \
+ F(cl_device_info, CL_DEVICE_HOST_UNIFIED_MEMORY, cl_bool) \
+ F(cl_device_info, CL_DEVICE_OPENCL_C_VERSION, STRING_CLASS) \
+ \
+ F(cl_mem_info, CL_MEM_ASSOCIATED_MEMOBJECT, cl::Memory) \
+ F(cl_mem_info, CL_MEM_OFFSET, ::size_t) \
+ \
+ F(cl_kernel_work_group_info, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, ::size_t) \
+ F(cl_kernel_work_group_info, CL_KERNEL_PRIVATE_MEM_SIZE, cl_ulong) \
+ \
+ F(cl_event_info, CL_EVENT_CONTEXT, cl::Context)
+#endif // CL_VERSION_1_1
+
+
+#if defined(CL_VERSION_1_2)
+#define __PARAM_NAME_INFO_1_2(F) \
+ F(cl_image_info, CL_IMAGE_BUFFER, cl::Buffer) \
+ \
+ F(cl_program_info, CL_PROGRAM_NUM_KERNELS, ::size_t) \
+ F(cl_program_info, CL_PROGRAM_KERNEL_NAMES, STRING_CLASS) \
+ \
+ F(cl_program_build_info, CL_PROGRAM_BINARY_TYPE, cl_program_binary_type) \
+ \
+ F(cl_kernel_info, CL_KERNEL_ATTRIBUTES, STRING_CLASS) \
+ \
+ F(cl_kernel_arg_info, CL_KERNEL_ARG_ADDRESS_QUALIFIER, cl_kernel_arg_address_qualifier) \
+ F(cl_kernel_arg_info, CL_KERNEL_ARG_ACCESS_QUALIFIER, cl_kernel_arg_access_qualifier) \
+ F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_NAME, STRING_CLASS) \
+ F(cl_kernel_arg_info, CL_KERNEL_ARG_NAME, STRING_CLASS) \
+ \
+ F(cl_device_info, CL_DEVICE_PARENT_DEVICE, cl_device_id) \
+ F(cl_device_info, CL_DEVICE_PARTITION_PROPERTIES, VECTOR_CLASS<cl_device_partition_property>) \
+ F(cl_device_info, CL_DEVICE_PARTITION_TYPE, VECTOR_CLASS<cl_device_partition_property>) \
+ F(cl_device_info, CL_DEVICE_REFERENCE_COUNT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_INTEROP_USER_SYNC, ::size_t) \
+ F(cl_device_info, CL_DEVICE_PARTITION_AFFINITY_DOMAIN, cl_device_affinity_domain) \
+ F(cl_device_info, CL_DEVICE_BUILT_IN_KERNELS, STRING_CLASS)
+#endif // #if defined(CL_VERSION_1_2)
+
+#if defined(USE_CL_DEVICE_FISSION)
+#define __PARAM_NAME_DEVICE_FISSION(F) \
+ F(cl_device_info, CL_DEVICE_PARENT_DEVICE_EXT, cl_device_id) \
+ F(cl_device_info, CL_DEVICE_PARTITION_TYPES_EXT, VECTOR_CLASS<cl_device_partition_property_ext>) \
+ F(cl_device_info, CL_DEVICE_AFFINITY_DOMAINS_EXT, VECTOR_CLASS<cl_device_partition_property_ext>) \
+ F(cl_device_info, CL_DEVICE_REFERENCE_COUNT_EXT , cl_uint) \
+ F(cl_device_info, CL_DEVICE_PARTITION_STYLE_EXT, VECTOR_CLASS<cl_device_partition_property_ext>)
+#endif // USE_CL_DEVICE_FISSION
+
+template <typename enum_type, cl_int Name>
+struct param_traits {};
+
+#define __CL_DECLARE_PARAM_TRAITS(token, param_name, T) \
+struct token; \
+template<> \
+struct param_traits<detail:: token,param_name> \
+{ \
+ enum { value = param_name }; \
+ typedef T param_type; \
+};
+
+__PARAM_NAME_INFO_1_0(__CL_DECLARE_PARAM_TRAITS)
+#if defined(CL_VERSION_1_1)
+__PARAM_NAME_INFO_1_1(__CL_DECLARE_PARAM_TRAITS)
+#endif // CL_VERSION_1_1
+#if defined(CL_VERSION_1_2)
+__PARAM_NAME_INFO_1_2(__CL_DECLARE_PARAM_TRAITS)
+#endif // CL_VERSION_1_1
+
+#if defined(USE_CL_DEVICE_FISSION)
+__PARAM_NAME_DEVICE_FISSION(__CL_DECLARE_PARAM_TRAITS);
+#endif // USE_CL_DEVICE_FISSION
+
+#ifdef CL_PLATFORM_ICD_SUFFIX_KHR
+__CL_DECLARE_PARAM_TRAITS(cl_platform_info, CL_PLATFORM_ICD_SUFFIX_KHR, STRING_CLASS)
+#endif
+
+#ifdef CL_DEVICE_PROFILING_TIMER_OFFSET_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_PROFILING_TIMER_OFFSET_AMD, cl_ulong)
+#endif
+
+#ifdef CL_DEVICE_GLOBAL_FREE_MEMORY_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_GLOBAL_FREE_MEMORY_AMD, VECTOR_CLASS< ::size_t>)
+#endif
+#ifdef CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_SIMD_WIDTH_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_SIMD_WIDTH_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_WAVEFRONT_WIDTH_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_WAVEFRONT_WIDTH_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_LOCAL_MEM_BANKS_AMD
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_LOCAL_MEM_BANKS_AMD, cl_uint)
+#endif
+
+#ifdef CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, cl_uint)
+#endif
+#ifdef CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, cl_uint)
+#endif
+#ifdef CL_DEVICE_REGISTERS_PER_BLOCK_NV
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_REGISTERS_PER_BLOCK_NV, cl_uint)
+#endif
+#ifdef CL_DEVICE_WARP_SIZE_NV
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_WARP_SIZE_NV, cl_uint)
+#endif
+#ifdef CL_DEVICE_GPU_OVERLAP_NV
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_GPU_OVERLAP_NV, cl_bool)
+#endif
+#ifdef CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, cl_bool)
+#endif
+#ifdef CL_DEVICE_INTEGRATED_MEMORY_NV
+__CL_DECLARE_PARAM_TRAITS(cl_device_info, CL_DEVICE_INTEGRATED_MEMORY_NV, cl_bool)
+#endif
+
+// Convenience functions
+
+template <typename Func, typename T>
+inline cl_int
+getInfo(Func f, cl_uint name, T* param)
+{
+ return getInfoHelper(f, name, param, 0);
+}
+
+template <typename Func, typename Arg0>
+struct GetInfoFunctor0
+{
+ Func f_; const Arg0& arg0_;
+ cl_int operator ()(
+ cl_uint param, ::size_t size, void* value, ::size_t* size_ret)
+ { return f_(arg0_, param, size, value, size_ret); }
+};
+
+template <typename Func, typename Arg0, typename Arg1>
+struct GetInfoFunctor1
+{
+ Func f_; const Arg0& arg0_; const Arg1& arg1_;
+ cl_int operator ()(
+ cl_uint param, ::size_t size, void* value, ::size_t* size_ret)
+ { return f_(arg0_, arg1_, param, size, value, size_ret); }
+};
+
+template <typename Func, typename Arg0, typename T>
+inline cl_int
+getInfo(Func f, const Arg0& arg0, cl_uint name, T* param)
+{
+ GetInfoFunctor0<Func, Arg0> f0 = { f, arg0 };
+ return getInfoHelper(f0, name, param, 0);
+}
+
+template <typename Func, typename Arg0, typename Arg1, typename T>
+inline cl_int
+getInfo(Func f, const Arg0& arg0, const Arg1& arg1, cl_uint name, T* param)
+{
+ GetInfoFunctor1<Func, Arg0, Arg1> f0 = { f, arg0, arg1 };
+ return getInfoHelper(f0, name, param, 0);
+}
+
+template<typename T>
+struct ReferenceHandler
+{ };
+
+#if defined(CL_VERSION_1_2)
+/**
+ * OpenCL 1.2 devices do have retain/release.
+ */
+template <>
+struct ReferenceHandler<cl_device_id>
+{
+ /**
+ * Retain the device.
+ * \param device A valid device created using createSubDevices
+ * \return
+ * CL_SUCCESS if the function executed successfully.
+ * CL_INVALID_DEVICE if device was not a valid subdevice
+ * CL_OUT_OF_RESOURCES
+ * CL_OUT_OF_HOST_MEMORY
+ */
+ static cl_int retain(cl_device_id device)
+ { return ::clRetainDevice(device); }
+ /**
+ * Retain the device.
+ * \param device A valid device created using createSubDevices
+ * \return
+ * CL_SUCCESS if the function executed successfully.
+ * CL_INVALID_DEVICE if device was not a valid subdevice
+ * CL_OUT_OF_RESOURCES
+ * CL_OUT_OF_HOST_MEMORY
+ */
+ static cl_int release(cl_device_id device)
+ { return ::clReleaseDevice(device); }
+};
+#else // #if defined(CL_VERSION_1_2)
+/**
+ * OpenCL 1.1 devices do not have retain/release.
+ */
+template <>
+struct ReferenceHandler<cl_device_id>
+{
+ // cl_device_id does not have retain().
+ static cl_int retain(cl_device_id)
+ { return CL_SUCCESS; }
+ // cl_device_id does not have release().
+ static cl_int release(cl_device_id)
+ { return CL_SUCCESS; }
+};
+#endif // #if defined(CL_VERSION_1_2)
+
+template <>
+struct ReferenceHandler<cl_platform_id>
+{
+ // cl_platform_id does not have retain().
+ static cl_int retain(cl_platform_id)
+ { return CL_SUCCESS; }
+ // cl_platform_id does not have release().
+ static cl_int release(cl_platform_id)
+ { return CL_SUCCESS; }
+};
+
+template <>
+struct ReferenceHandler<cl_context>
+{
+ static cl_int retain(cl_context context)
+ { return ::clRetainContext(context); }
+ static cl_int release(cl_context context)
+ { return ::clReleaseContext(context); }
+};
+
+template <>
+struct ReferenceHandler<cl_command_queue>
+{
+ static cl_int retain(cl_command_queue queue)
+ { return ::clRetainCommandQueue(queue); }
+ static cl_int release(cl_command_queue queue)
+ { return ::clReleaseCommandQueue(queue); }
+};
+
+template <>
+struct ReferenceHandler<cl_mem>
+{
+ static cl_int retain(cl_mem memory)
+ { return ::clRetainMemObject(memory); }
+ static cl_int release(cl_mem memory)
+ { return ::clReleaseMemObject(memory); }
+};
+
+template <>
+struct ReferenceHandler<cl_sampler>
+{
+ static cl_int retain(cl_sampler sampler)
+ { return ::clRetainSampler(sampler); }
+ static cl_int release(cl_sampler sampler)
+ { return ::clReleaseSampler(sampler); }
+};
+
+template <>
+struct ReferenceHandler<cl_program>
+{
+ static cl_int retain(cl_program program)
+ { return ::clRetainProgram(program); }
+ static cl_int release(cl_program program)
+ { return ::clReleaseProgram(program); }
+};
+
+template <>
+struct ReferenceHandler<cl_kernel>
+{
+ static cl_int retain(cl_kernel kernel)
+ { return ::clRetainKernel(kernel); }
+ static cl_int release(cl_kernel kernel)
+ { return ::clReleaseKernel(kernel); }
+};
+
+template <>
+struct ReferenceHandler<cl_event>
+{
+ static cl_int retain(cl_event event)
+ { return ::clRetainEvent(event); }
+ static cl_int release(cl_event event)
+ { return ::clReleaseEvent(event); }
+};
+
+
+// Extracts version number with major in the upper 16 bits, minor in the lower 16
+static cl_uint getVersion(const char *versionInfo)
+{
+ int highVersion = 0;
+ int lowVersion = 0;
+ int index = 7;
+ while(versionInfo[index] != '.' ) {
+ highVersion *= 10;
+ highVersion += versionInfo[index]-'0';
+ ++index;
+ }
+ ++index;
+ while(versionInfo[index] != ' ' ) {
+ lowVersion *= 10;
+ lowVersion += versionInfo[index]-'0';
+ ++index;
+ }
+ return (highVersion << 16) | lowVersion;
+}
+
+static cl_uint getPlatformVersion(cl_platform_id platform)
+{
+ ::size_t size = 0;
+ clGetPlatformInfo(platform, CL_PLATFORM_VERSION, 0, NULL, &size);
+ char *versionInfo = (char *) alloca(size);
+ clGetPlatformInfo(platform, CL_PLATFORM_VERSION, size, &versionInfo[0], &size);
+ return getVersion(versionInfo);
+}
+
+static cl_uint getDevicePlatformVersion(cl_device_id device)
+{
+ cl_platform_id platform;
+ clGetDeviceInfo(device, CL_DEVICE_PLATFORM, sizeof(platform), &platform, NULL);
+ return getPlatformVersion(platform);
+}
+
+#if defined(CL_VERSION_1_2) && defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+static cl_uint getContextPlatformVersion(cl_context context)
+{
+ // The platform cannot be queried directly, so we first have to grab a
+ // device and obtain its context
+ ::size_t size = 0;
+ clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &size);
+ if (size == 0)
+ return 0;
+ cl_device_id *devices = (cl_device_id *) alloca(size);
+ clGetContextInfo(context, CL_CONTEXT_DEVICES, size, devices, NULL);
+ return getDevicePlatformVersion(devices[0]);
+}
+#endif // #if defined(CL_VERSION_1_2) && defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+
+template <typename T>
+class Wrapper
+{
+public:
+ typedef T cl_type;
+
+protected:
+ cl_type object_;
+
+public:
+ Wrapper() : object_(NULL) { }
+
+ Wrapper(const cl_type &obj) : object_(obj) { }
+
+ ~Wrapper()
+ {
+ if (object_ != NULL) { release(); }
+ }
+
+ Wrapper(const Wrapper<cl_type>& rhs)
+ {
+ object_ = rhs.object_;
+ if (object_ != NULL) { detail::errHandler(retain(), __RETAIN_ERR); }
+ }
+
+ Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
+ {
+ if (object_ != NULL) { detail::errHandler(release(), __RELEASE_ERR); }
+ object_ = rhs.object_;
+ if (object_ != NULL) { detail::errHandler(retain(), __RETAIN_ERR); }
+ return *this;
+ }
+
+ Wrapper<cl_type>& operator = (const cl_type &rhs)
+ {
+ if (object_ != NULL) { detail::errHandler(release(), __RELEASE_ERR); }
+ object_ = rhs;
+ return *this;
+ }
+
+ cl_type operator ()() const { return object_; }
+
+ cl_type& operator ()() { return object_; }
+
+protected:
+ template<typename Func, typename U>
+ friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
+
+ cl_int retain() const
+ {
+ return ReferenceHandler<cl_type>::retain(object_);
+ }
+
+ cl_int release() const
+ {
+ return ReferenceHandler<cl_type>::release(object_);
+ }
+};
+
+template <>
+class Wrapper<cl_device_id>
+{
+public:
+ typedef cl_device_id cl_type;
+
+protected:
+ cl_type object_;
+ bool referenceCountable_;
+
+ static bool isReferenceCountable(cl_device_id device)
+ {
+ bool retVal = false;
+ if (device != NULL) {
+ int version = getDevicePlatformVersion(device);
+ if(version > ((1 << 16) + 1)) {
+ retVal = true;
+ }
+ }
+ return retVal;
+ }
+
+public:
+ Wrapper() : object_(NULL), referenceCountable_(false)
+ {
+ }
+
+ Wrapper(const cl_type &obj) : object_(obj), referenceCountable_(false)
+ {
+ referenceCountable_ = isReferenceCountable(obj);
+ }
+
+ ~Wrapper()
+ {
+ if (object_ != NULL) { release(); }
+ }
+
+ Wrapper(const Wrapper<cl_type>& rhs)
+ {
+ object_ = rhs.object_;
+ referenceCountable_ = isReferenceCountable(object_);
+ if (object_ != NULL) { detail::errHandler(retain(), __RETAIN_ERR); }
+ }
+
+ Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
+ {
+ if (object_ != NULL) { detail::errHandler(release(), __RELEASE_ERR); }
+ object_ = rhs.object_;
+ referenceCountable_ = rhs.referenceCountable_;
+ if (object_ != NULL) { detail::errHandler(retain(), __RETAIN_ERR); }
+ return *this;
+ }
+
+ Wrapper<cl_type>& operator = (const cl_type &rhs)
+ {
+ if (object_ != NULL) { detail::errHandler(release(), __RELEASE_ERR); }
+ object_ = rhs;
+ referenceCountable_ = isReferenceCountable(object_);
+ return *this;
+ }
+
+ cl_type operator ()() const { return object_; }
+
+ cl_type& operator ()() { return object_; }
+
+protected:
+ template<typename Func, typename U>
+ friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
+
+ template<typename Func, typename U>
+ friend inline cl_int getInfoHelper(Func, cl_uint, VECTOR_CLASS<U>*, int, typename U::cl_type);
+
+ cl_int retain() const
+ {
+ if( referenceCountable_ ) {
+ return ReferenceHandler<cl_type>::retain(object_);
+ }
+ else {
+ return CL_SUCCESS;
+ }
+ }
+
+ cl_int release() const
+ {
+ if( referenceCountable_ ) {
+ return ReferenceHandler<cl_type>::release(object_);
+ }
+ else {
+ return CL_SUCCESS;
+ }
+ }
+};
+
+} // namespace detail
+//! \endcond
+
+/*! \stuct ImageFormat
+ * \brief Adds constructors and member functions for cl_image_format.
+ *
+ * \see cl_image_format
+ */
+struct ImageFormat : public cl_image_format
+{
+ //! \brief Default constructor - performs no initialization.
+ ImageFormat(){}
+
+ //! \brief Initializing constructor.
+ ImageFormat(cl_channel_order order, cl_channel_type type)
+ {
+ image_channel_order = order;
+ image_channel_data_type = type;
+ }
+
+ //! \brief Assignment operator.
+ ImageFormat& operator = (const ImageFormat& rhs)
+ {
+ if (this != &rhs) {
+ this->image_channel_data_type = rhs.image_channel_data_type;
+ this->image_channel_order = rhs.image_channel_order;
+ }
+ return *this;
+ }
+};
+
+/*! \brief Class interface for cl_device_id.
+ *
+ * \note Copies of these objects are inexpensive, since they don't 'own'
+ * any underlying resources or data structures.
+ *
+ * \see cl_device_id
+ */
+class Device : public detail::Wrapper<cl_device_id>
+{
+public:
+ //! \brief Default constructor - initializes to NULL.
+ Device() : detail::Wrapper<cl_type>() { }
+
+ /*! \brief Copy constructor.
+ *
+ * This simply copies the device ID value, which is an inexpensive operation.
+ */
+ Device(const Device& device) : detail::Wrapper<cl_type>(device) { }
+
+ /*! \brief Constructor from cl_device_id.
+ *
+ * This simply copies the device ID value, which is an inexpensive operation.
+ */
+ Device(const cl_device_id &device) : detail::Wrapper<cl_type>(device) { }
+
+ /*! \brief Returns the first device on the default context.
+ *
+ * \see Context::getDefault()
+ */
+ static Device getDefault(cl_int * err = NULL);
+
+ /*! \brief Assignment operator from Device.
+ *
+ * This simply copies the device ID value, which is an inexpensive operation.
+ */
+ Device& operator = (const Device& rhs)
+ {
+ if (this != &rhs) {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment operator from cl_device_id.
+ *
+ * This simply copies the device ID value, which is an inexpensive operation.
+ */
+ Device& operator = (const cl_device_id& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetDeviceInfo().
+ template <typename T>
+ cl_int getInfo(cl_device_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetDeviceInfo, object_, name, param),
+ __GET_DEVICE_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetDeviceInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_device_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_device_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ /**
+ * CL 1.2 version
+ */
+#if defined(CL_VERSION_1_2)
+ //! \brief Wrapper for clCreateSubDevicesEXT().
+ cl_int createSubDevices(
+ const cl_device_partition_property * properties,
+ VECTOR_CLASS<Device>* devices)
+ {
+ cl_uint n = 0;
+ cl_int err = clCreateSubDevices(object_, properties, 0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_SUB_DEVICES);
+ }
+
+ cl_device_id* ids = (cl_device_id*) alloca(n * sizeof(cl_device_id));
+ err = clCreateSubDevices(object_, properties, n, ids, NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_SUB_DEVICES);
+ }
+
+ devices->assign(&ids[0], &ids[n]);
+ return CL_SUCCESS;
+ }
+#endif // #if defined(CL_VERSION_1_2)
+
+/**
+ * CL 1.1 version that uses device fission.
+ */
+#if defined(CL_VERSION_1_1)
+#if defined(USE_CL_DEVICE_FISSION)
+ cl_int createSubDevices(
+ const cl_device_partition_property_ext * properties,
+ VECTOR_CLASS<Device>* devices)
+ {
+ typedef CL_API_ENTRY cl_int
+ ( CL_API_CALL * PFN_clCreateSubDevicesEXT)(
+ cl_device_id /*in_device*/,
+ const cl_device_partition_property_ext * /* properties */,
+ cl_uint /*num_entries*/,
+ cl_device_id * /*out_devices*/,
+ cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
+
+ static PFN_clCreateSubDevicesEXT pfn_clCreateSubDevicesEXT = NULL;
+ __INIT_CL_EXT_FCN_PTR(clCreateSubDevicesEXT);
+
+ cl_uint n = 0;
+ cl_int err = pfn_clCreateSubDevicesEXT(object_, properties, 0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_SUB_DEVICES);
+ }
+
+ cl_device_id* ids = (cl_device_id*) alloca(n * sizeof(cl_device_id));
+ err = pfn_clCreateSubDevicesEXT(object_, properties, n, ids, NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_SUB_DEVICES);
+ }
+
+ devices->assign(&ids[0], &ids[n]);
+ return CL_SUCCESS;
+ }
+#endif // #if defined(USE_CL_DEVICE_FISSION)
+#endif // #if defined(CL_VERSION_1_1)
+};
+
+/*! \brief Class interface for cl_platform_id.
+ *
+ * \note Copies of these objects are inexpensive, since they don't 'own'
+ * any underlying resources or data structures.
+ *
+ * \see cl_platform_id
+ */
+class Platform : public detail::Wrapper<cl_platform_id>
+{
+public:
+ //! \brief Default constructor - initializes to NULL.
+ Platform() : detail::Wrapper<cl_type>() { }
+
+ /*! \brief Copy constructor.
+ *
+ * This simply copies the platform ID value, which is an inexpensive operation.
+ */
+ Platform(const Platform& platform) : detail::Wrapper<cl_type>(platform) { }
+
+ /*! \brief Constructor from cl_platform_id.
+ *
+ * This simply copies the platform ID value, which is an inexpensive operation.
+ */
+ Platform(const cl_platform_id &platform) : detail::Wrapper<cl_type>(platform) { }
+
+ /*! \brief Assignment operator from Platform.
+ *
+ * This simply copies the platform ID value, which is an inexpensive operation.
+ */
+ Platform& operator = (const Platform& rhs)
+ {
+ if (this != &rhs) {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment operator from cl_platform_id.
+ *
+ * This simply copies the platform ID value, which is an inexpensive operation.
+ */
+ Platform& operator = (const cl_platform_id& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetPlatformInfo().
+ cl_int getInfo(cl_platform_info name, STRING_CLASS* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetPlatformInfo, object_, name, param),
+ __GET_PLATFORM_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetPlatformInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_platform_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_platform_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ /*! \brief Gets a list of devices for this platform.
+ *
+ * Wraps clGetDeviceIDs().
+ */
+ cl_int getDevices(
+ cl_device_type type,
+ VECTOR_CLASS<Device>* devices) const
+ {
+ cl_uint n = 0;
+ if( devices == NULL ) {
+ return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
+ }
+ cl_int err = ::clGetDeviceIDs(object_, type, 0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
+ }
+
+ cl_device_id* ids = (cl_device_id*) alloca(n * sizeof(cl_device_id));
+ err = ::clGetDeviceIDs(object_, type, n, ids, NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
+ }
+
+ devices->assign(&ids[0], &ids[n]);
+ return CL_SUCCESS;
+ }
+
+#if defined(USE_DX_INTEROP)
+ /*! \brief Get the list of available D3D10 devices.
+ *
+ * \param d3d_device_source.
+ *
+ * \param d3d_object.
+ *
+ * \param d3d_device_set.
+ *
+ * \param devices returns a vector of OpenCL D3D10 devices found. The cl::Device
+ * values returned in devices can be used to identify a specific OpenCL
+ * device. If \a devices argument is NULL, this argument is ignored.
+ *
+ * \return One of the following values:
+ * - CL_SUCCESS if the function is executed successfully.
+ *
+ * The application can query specific capabilities of the OpenCL device(s)
+ * returned by cl::getDevices. This can be used by the application to
+ * determine which device(s) to use.
+ *
+ * \note In the case that exceptions are enabled and a return value
+ * other than CL_SUCCESS is generated, then cl::Error exception is
+ * generated.
+ */
+ cl_int getDevices(
+ cl_d3d10_device_source_khr d3d_device_source,
+ void * d3d_object,
+ cl_d3d10_device_set_khr d3d_device_set,
+ VECTOR_CLASS<Device>* devices) const
+ {
+ typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clGetDeviceIDsFromD3D10KHR)(
+ cl_platform_id platform,
+ cl_d3d10_device_source_khr d3d_device_source,
+ void * d3d_object,
+ cl_d3d10_device_set_khr d3d_device_set,
+ cl_uint num_entries,
+ cl_device_id * devices,
+ cl_uint* num_devices);
+
+ if( devices == NULL ) {
+ return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
+ }
+
+ static PFN_clGetDeviceIDsFromD3D10KHR pfn_clGetDeviceIDsFromD3D10KHR = NULL;
+ __INIT_CL_EXT_FCN_PTR_PLATFORM(object_, clGetDeviceIDsFromD3D10KHR);
+
+ cl_uint n = 0;
+ cl_int err = pfn_clGetDeviceIDsFromD3D10KHR(
+ object_,
+ d3d_device_source,
+ d3d_object,
+ d3d_device_set,
+ 0,
+ NULL,
+ &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
+ }
+
+ cl_device_id* ids = (cl_device_id*) alloca(n * sizeof(cl_device_id));
+ err = pfn_clGetDeviceIDsFromD3D10KHR(
+ object_,
+ d3d_device_source,
+ d3d_object,
+ d3d_device_set,
+ n,
+ ids,
+ NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
+ }
+
+ devices->assign(&ids[0], &ids[n]);
+ return CL_SUCCESS;
+ }
+#endif
+
+ /*! \brief Gets a list of available platforms.
+ *
+ * Wraps clGetPlatformIDs().
+ */
+ static cl_int get(
+ VECTOR_CLASS<Platform>* platforms)
+ {
+ cl_uint n = 0;
+
+ if( platforms == NULL ) {
+ return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_PLATFORM_IDS_ERR);
+ }
+
+ cl_int err = ::clGetPlatformIDs(0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
+ }
+
+ cl_platform_id* ids = (cl_platform_id*) alloca(
+ n * sizeof(cl_platform_id));
+ err = ::clGetPlatformIDs(n, ids, NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
+ }
+
+ platforms->assign(&ids[0], &ids[n]);
+ return CL_SUCCESS;
+ }
+
+ /*! \brief Gets the first available platform.
+ *
+ * Wraps clGetPlatformIDs(), returning the first result.
+ */
+ static cl_int get(
+ Platform * platform)
+ {
+ cl_uint n = 0;
+
+ if( platform == NULL ) {
+ return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_PLATFORM_IDS_ERR);
+ }
+
+ cl_int err = ::clGetPlatformIDs(0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
+ }
+
+ cl_platform_id* ids = (cl_platform_id*) alloca(
+ n * sizeof(cl_platform_id));
+ err = ::clGetPlatformIDs(n, ids, NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
+ }
+
+ *platform = ids[0];
+ return CL_SUCCESS;
+ }
+
+ /*! \brief Gets the first available platform, returning it by value.
+ *
+ * Wraps clGetPlatformIDs(), returning the first result.
+ */
+ static Platform get(
+ cl_int * errResult = NULL)
+ {
+ Platform platform;
+ cl_uint n = 0;
+ cl_int err = ::clGetPlatformIDs(0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
+ if (errResult != NULL) {
+ *errResult = err;
+ }
+ }
+
+ cl_platform_id* ids = (cl_platform_id*) alloca(
+ n * sizeof(cl_platform_id));
+ err = ::clGetPlatformIDs(n, ids, NULL);
+
+ if (err != CL_SUCCESS) {
+ detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
+ }
+
+ if (errResult != NULL) {
+ *errResult = err;
+ }
+
+ return ids[0];
+ }
+
+ static Platform getDefault(
+ cl_int *errResult = NULL )
+ {
+ return get(errResult);
+ }
+
+
+#if defined(CL_VERSION_1_2)
+ //! \brief Wrapper for clUnloadCompiler().
+ cl_int
+ unloadCompiler()
+ {
+ return ::clUnloadPlatformCompiler(object_);
+ }
+#endif // #if defined(CL_VERSION_1_2)
+}; // class Platform
+
+/**
+ * Deprecated APIs for 1.2
+ */
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) || (defined(CL_VERSION_1_1) && !defined(CL_VERSION_1_2))
+/**
+ * Unload the OpenCL compiler.
+ * \note Deprecated for OpenCL 1.2. Use Platform::unloadCompiler instead.
+ */
+inline CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int
+UnloadCompiler() CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+inline cl_int
+UnloadCompiler()
+{
+ return ::clUnloadCompiler();
+}
+#endif // #if defined(CL_VERSION_1_1)
+
+/*! \brief Class interface for cl_context.
+ *
+ * \note Copies of these objects are shallow, meaning that the copy will refer
+ * to the same underlying cl_context as the original. For details, see
+ * clRetainContext() and clReleaseContext().
+ *
+ * \see cl_context
+ */
+class Context
+ : public detail::Wrapper<cl_context>
+{
+private:
+ static volatile int default_initialized_;
+ static Context default_;
+ static volatile cl_int default_error_;
+public:
+ /*! \brief Destructor.
+ *
+ * This calls clReleaseContext() on the value held by this instance.
+ */
+ ~Context() { }
+
+ /*! \brief Constructs a context including a list of specified devices.
+ *
+ * Wraps clCreateContext().
+ */
+ Context(
+ const VECTOR_CLASS<Device>& devices,
+ cl_context_properties* properties = NULL,
+ void (CL_CALLBACK * notifyFptr)(
+ const char *,
+ const void *,
+ ::size_t,
+ void *) = NULL,
+ void* data = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ ::size_t numDevices = devices.size();
+ cl_device_id* deviceIDs = (cl_device_id*) alloca(numDevices * sizeof(cl_device_id));
+ for( ::size_t deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
+ deviceIDs[deviceIndex] = (devices[deviceIndex])();
+ }
+
+ object_ = ::clCreateContext(
+ properties, (cl_uint) numDevices,
+ deviceIDs,
+ notifyFptr, data, &error);
+
+ detail::errHandler(error, __CREATE_CONTEXT_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Context(
+ const Device& device,
+ cl_context_properties* properties = NULL,
+ void (CL_CALLBACK * notifyFptr)(
+ const char *,
+ const void *,
+ ::size_t,
+ void *) = NULL,
+ void* data = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ cl_device_id deviceID = device();
+
+ object_ = ::clCreateContext(
+ properties, 1,
+ &deviceID,
+ notifyFptr, data, &error);
+
+ detail::errHandler(error, __CREATE_CONTEXT_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*! \brief Constructs a context including all or a subset of devices of a specified type.
+ *
+ * Wraps clCreateContextFromType().
+ */
+ Context(
+ cl_device_type type,
+ cl_context_properties* properties = NULL,
+ void (CL_CALLBACK * notifyFptr)(
+ const char *,
+ const void *,
+ ::size_t,
+ void *) = NULL,
+ void* data = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+#if !defined(__APPLE__) || !defined(__MACOS)
+ cl_context_properties prop[4] = {CL_CONTEXT_PLATFORM, 0, 0, 0 };
+
+ if (properties == NULL) {
+ // Get a valid platform ID as we cannot send in a blank one
+ VECTOR_CLASS<Platform> platforms;
+ error = Platform::get(&platforms);
+ if (error != CL_SUCCESS) {
+ detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ return;
+ }
+
+ // Check the platforms we found for a device of our specified type
+ cl_context_properties platform_id = 0;
+ for (unsigned int i = 0; i < platforms.size(); i++) {
+
+ VECTOR_CLASS<Device> devices;
+
+#if defined(__CL_ENABLE_EXCEPTIONS)
+ try {
+#endif
+
+ error = platforms[i].getDevices(type, &devices);
+
+#if defined(__CL_ENABLE_EXCEPTIONS)
+ } catch (Error) {}
+ // Catch if exceptions are enabled as we don't want to exit if first platform has no devices of type
+ // We do error checking next anyway, and can throw there if needed
+#endif
+
+ // Only squash CL_SUCCESS and CL_DEVICE_NOT_FOUND
+ if (error != CL_SUCCESS && error != CL_DEVICE_NOT_FOUND) {
+ detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ if (devices.size() > 0) {
+ platform_id = (cl_context_properties)platforms[i]();
+ break;
+ }
+ }
+
+ if (platform_id == 0) {
+ detail::errHandler(CL_DEVICE_NOT_FOUND, __CREATE_CONTEXT_FROM_TYPE_ERR);
+ if (err != NULL) {
+ *err = CL_DEVICE_NOT_FOUND;
+ }
+ return;
+ }
+
+ prop[1] = platform_id;
+ properties = &prop[0];
+ }
+#endif
+ object_ = ::clCreateContextFromType(
+ properties, type, notifyFptr, data, &error);
+
+ detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*! \brief Returns a singleton context including all devices of CL_DEVICE_TYPE_DEFAULT.
+ *
+ * \note All calls to this function return the same cl_context as the first.
+ */
+ static Context getDefault(cl_int * err = NULL)
+ {
+ int state = detail::compare_exchange(
+ &default_initialized_,
+ __DEFAULT_BEING_INITIALIZED, __DEFAULT_NOT_INITIALIZED);
+
+ if (state & __DEFAULT_INITIALIZED) {
+ if (err != NULL) {
+ *err = default_error_;
+ }
+ return default_;
+ }
+
+ if (state & __DEFAULT_BEING_INITIALIZED) {
+ // Assume writes will propagate eventually...
+ while(default_initialized_ != __DEFAULT_INITIALIZED) {
+ detail::fence();
+ }
+
+ if (err != NULL) {
+ *err = default_error_;
+ }
+ return default_;
+ }
+
+ cl_int error;
+ default_ = Context(
+ CL_DEVICE_TYPE_DEFAULT,
+ NULL,
+ NULL,
+ NULL,
+ &error);
+
+ detail::fence();
+
+ default_error_ = error;
+ // Assume writes will propagate eventually...
+ default_initialized_ = __DEFAULT_INITIALIZED;
+
+ detail::fence();
+
+ if (err != NULL) {
+ *err = default_error_;
+ }
+ return default_;
+
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Context() : detail::Wrapper<cl_type>() { }
+
+ /*! \brief Copy constructor.
+ *
+ * This calls clRetainContext() on the parameter's cl_context.
+ */
+ Context(const Context& context) : detail::Wrapper<cl_type>(context) { }
+
+ /*! \brief Constructor from cl_context - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the cl_context
+ * into the new Context object.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS Context(const cl_context& context) : detail::Wrapper<cl_type>(context) { }
+
+ /*! \brief Assignment operator from Context.
+ *
+ * This calls clRetainContext() on the parameter and clReleaseContext() on
+ * the previous value held by this instance.
+ */
+ Context& operator = (const Context& rhs)
+ {
+ if (this != &rhs) {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment operator from cl_context - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the rhs and calls
+ * clReleaseContext() on the value previously held by this instance.
+ */
+ Context& operator = (const cl_context& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetContextInfo().
+ template <typename T>
+ cl_int getInfo(cl_context_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetContextInfo, object_, name, param),
+ __GET_CONTEXT_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetContextInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_context_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_context_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ /*! \brief Gets a list of supported image formats.
+ *
+ * Wraps clGetSupportedImageFormats().
+ */
+ cl_int getSupportedImageFormats(
+ cl_mem_flags flags,
+ cl_mem_object_type type,
+ VECTOR_CLASS<ImageFormat>* formats) const
+ {
+ cl_uint numEntries;
+ cl_int err = ::clGetSupportedImageFormats(
+ object_,
+ flags,
+ type,
+ 0,
+ NULL,
+ &numEntries);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
+ }
+
+ ImageFormat* value = (ImageFormat*)
+ alloca(numEntries * sizeof(ImageFormat));
+ err = ::clGetSupportedImageFormats(
+ object_,
+ flags,
+ type,
+ numEntries,
+ (cl_image_format*) value,
+ NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
+ }
+
+ formats->assign(&value[0], &value[numEntries]);
+ return CL_SUCCESS;
+ }
+};
+
+inline Device Device::getDefault(cl_int * err)
+{
+ cl_int error;
+ Device device;
+
+ Context context = Context::getDefault(&error);
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+
+ if (error != CL_SUCCESS) {
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+ else {
+ device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
+ if (err != NULL) {
+ *err = CL_SUCCESS;
+ }
+ }
+
+ return device;
+}
+
+
+#ifdef _WIN32
+__declspec(selectany) volatile int Context::default_initialized_ = __DEFAULT_NOT_INITIALIZED;
+__declspec(selectany) Context Context::default_;
+__declspec(selectany) volatile cl_int Context::default_error_ = CL_SUCCESS;
+#else
+__attribute__((weak)) volatile int Context::default_initialized_ = __DEFAULT_NOT_INITIALIZED;
+__attribute__((weak)) Context Context::default_;
+__attribute__((weak)) volatile cl_int Context::default_error_ = CL_SUCCESS;
+#endif
+
+/*! \brief Class interface for cl_event.
+ *
+ * \note Copies of these objects are shallow, meaning that the copy will refer
+ * to the same underlying cl_event as the original. For details, see
+ * clRetainEvent() and clReleaseEvent().
+ *
+ * \see cl_event
+ */
+class Event : public detail::Wrapper<cl_event>
+{
+public:
+ /*! \brief Destructor.
+ *
+ * This calls clReleaseEvent() on the value held by this instance.
+ */
+ ~Event() { }
+
+ //! \brief Default constructor - initializes to NULL.
+ Event() : detail::Wrapper<cl_type>() { }
+
+ /*! \brief Copy constructor.
+ *
+ * This calls clRetainEvent() on the parameter's cl_event.
+ */
+ Event(const Event& event) : detail::Wrapper<cl_type>(event) { }
+
+ /*! \brief Constructor from cl_event - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the cl_event
+ * into the new Event object.
+ */
+ Event(const cl_event& event) : detail::Wrapper<cl_type>(event) { }
+
+ /*! \brief Assignment operator from cl_event - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the rhs and calls
+ * clReleaseEvent() on the value previously held by this instance.
+ */
+ Event& operator = (const Event& rhs)
+ {
+ if (this != &rhs) {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment operator from cl_event.
+ *
+ * This calls clRetainEvent() on the parameter and clReleaseEvent() on
+ * the previous value held by this instance.
+ */
+ Event& operator = (const cl_event& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetEventInfo().
+ template <typename T>
+ cl_int getInfo(cl_event_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetEventInfo, object_, name, param),
+ __GET_EVENT_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetEventInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_event_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_event_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ //! \brief Wrapper for clGetEventProfilingInfo().
+ template <typename T>
+ cl_int getProfilingInfo(cl_profiling_info name, T* param) const
+ {
+ return detail::errHandler(detail::getInfo(
+ &::clGetEventProfilingInfo, object_, name, param),
+ __GET_EVENT_PROFILE_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetEventProfilingInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_profiling_info, name>::param_type
+ getProfilingInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_profiling_info, name>::param_type param;
+ cl_int result = getProfilingInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ /*! \brief Blocks the calling thread until this event completes.
+ *
+ * Wraps clWaitForEvents().
+ */
+ cl_int wait() const
+ {
+ return detail::errHandler(
+ ::clWaitForEvents(1, &object_),
+ __WAIT_FOR_EVENTS_ERR);
+ }
+
+#if defined(CL_VERSION_1_1)
+ /*! \brief Registers a user callback function for a specific command execution status.
+ *
+ * Wraps clSetEventCallback().
+ */
+ cl_int setCallback(
+ cl_int type,
+ void (CL_CALLBACK * pfn_notify)(cl_event, cl_int, void *),
+ void * user_data = NULL)
+ {
+ return detail::errHandler(
+ ::clSetEventCallback(
+ object_,
+ type,
+ pfn_notify,
+ user_data),
+ __SET_EVENT_CALLBACK_ERR);
+ }
+#endif
+
+ /*! \brief Blocks the calling thread until every event specified is complete.
+ *
+ * Wraps clWaitForEvents().
+ */
+ static cl_int
+ waitForEvents(const VECTOR_CLASS<Event>& events)
+ {
+ return detail::errHandler(
+ ::clWaitForEvents(
+ (cl_uint) events.size(), (cl_event*)&events.front()),
+ __WAIT_FOR_EVENTS_ERR);
+ }
+};
+
+#if defined(CL_VERSION_1_1)
+/*! \brief Class interface for user events (a subset of cl_event's).
+ *
+ * See Event for details about copy semantics, etc.
+ */
+class UserEvent : public Event
+{
+public:
+ /*! \brief Constructs a user event on a given context.
+ *
+ * Wraps clCreateUserEvent().
+ */
+ UserEvent(
+ const Context& context,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateUserEvent(
+ context(),
+ &error);
+
+ detail::errHandler(error, __CREATE_USER_EVENT_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ UserEvent() : Event() { }
+
+ //! \brief Copy constructor - performs shallow copy.
+ UserEvent(const UserEvent& event) : Event(event) { }
+
+ //! \brief Assignment Operator - performs shallow copy.
+ UserEvent& operator = (const UserEvent& rhs)
+ {
+ if (this != &rhs) {
+ Event::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Sets the execution status of a user event object.
+ *
+ * Wraps clSetUserEventStatus().
+ */
+ cl_int setStatus(cl_int status)
+ {
+ return detail::errHandler(
+ ::clSetUserEventStatus(object_,status),
+ __SET_USER_EVENT_STATUS_ERR);
+ }
+};
+#endif
+
+/*! \brief Blocks the calling thread until every event specified is complete.
+ *
+ * Wraps clWaitForEvents().
+ */
+inline static cl_int
+WaitForEvents(const VECTOR_CLASS<Event>& events)
+{
+ return detail::errHandler(
+ ::clWaitForEvents(
+ (cl_uint) events.size(), (cl_event*)&events.front()),
+ __WAIT_FOR_EVENTS_ERR);
+}
+
+/*! \brief Class interface for cl_mem.
+ *
+ * \note Copies of these objects are shallow, meaning that the copy will refer
+ * to the same underlying cl_mem as the original. For details, see
+ * clRetainMemObject() and clReleaseMemObject().
+ *
+ * \see cl_mem
+ */
+class Memory : public detail::Wrapper<cl_mem>
+{
+public:
+
+ /*! \brief Destructor.
+ *
+ * This calls clReleaseMemObject() on the value held by this instance.
+ */
+ ~Memory() {}
+
+ //! \brief Default constructor - initializes to NULL.
+ Memory() : detail::Wrapper<cl_type>() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * This calls clRetainMemObject() on the parameter's cl_mem.
+ */
+ Memory(const Memory& memory) : detail::Wrapper<cl_type>(memory) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the cl_mem
+ * into the new Memory object.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS Memory(const cl_mem& memory) : detail::Wrapper<cl_type>(memory) { }
+
+ /*! \brief Assignment operator from Memory.
+ *
+ * This calls clRetainMemObject() on the parameter and clReleaseMemObject()
+ * on the previous value held by this instance.
+ */
+ Memory& operator = (const Memory& rhs)
+ {
+ if (this != &rhs) {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment operator from cl_mem - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the rhs and calls
+ * clReleaseMemObject() on the value previously held by this instance.
+ */
+ Memory& operator = (const cl_mem& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetMemObjectInfo().
+ template <typename T>
+ cl_int getInfo(cl_mem_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetMemObjectInfo, object_, name, param),
+ __GET_MEM_OBJECT_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetMemObjectInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_mem_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_mem_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+#if defined(CL_VERSION_1_1)
+ /*! \brief Registers a callback function to be called when the memory object
+ * is no longer needed.
+ *
+ * Wraps clSetMemObjectDestructorCallback().
+ *
+ * Repeated calls to this function, for a given cl_mem value, will append
+ * to the list of functions called (in reverse order) when memory object's
+ * resources are freed and the memory object is deleted.
+ *
+ * \note
+ * The registered callbacks are associated with the underlying cl_mem
+ * value - not the Memory class instance.
+ */
+ cl_int setDestructorCallback(
+ void (CL_CALLBACK * pfn_notify)(cl_mem, void *),
+ void * user_data = NULL)
+ {
+ return detail::errHandler(
+ ::clSetMemObjectDestructorCallback(
+ object_,
+ pfn_notify,
+ user_data),
+ __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR);
+ }
+#endif
+
+};
+
+// Pre-declare copy functions
+class Buffer;
+template< typename IteratorType >
+cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
+template< typename IteratorType >
+cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
+template< typename IteratorType >
+cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
+template< typename IteratorType >
+cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
+
+
+/*! \brief Class interface for Buffer Memory Objects.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Buffer : public Memory
+{
+public:
+
+ /*! \brief Constructs a Buffer in a specified context.
+ *
+ * Wraps clCreateBuffer().
+ *
+ * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
+ * specified. Note alignment & exclusivity requirements.
+ */
+ Buffer(
+ const Context& context,
+ cl_mem_flags flags,
+ ::size_t size,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
+
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*! \brief Constructs a Buffer in the default context.
+ *
+ * Wraps clCreateBuffer().
+ *
+ * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
+ * specified. Note alignment & exclusivity requirements.
+ *
+ * \see Context::getDefault()
+ */
+ Buffer(
+ cl_mem_flags flags,
+ ::size_t size,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ Context context = Context::getDefault(err);
+
+ object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
+
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*!
+ * \brief Construct a Buffer from a host container via iterators.
+ * IteratorType must be random access.
+ * If useHostPtr is specified iterators must represent contiguous data.
+ */
+ template< typename IteratorType >
+ Buffer(
+ IteratorType startIterator,
+ IteratorType endIterator,
+ bool readOnly,
+ bool useHostPtr = false,
+ cl_int* err = NULL)
+ {
+ typedef typename std::iterator_traits<IteratorType>::value_type DataType;
+ cl_int error;
+
+ cl_mem_flags flags = 0;
+ if( readOnly ) {
+ flags |= CL_MEM_READ_ONLY;
+ }
+ else {
+ flags |= CL_MEM_READ_WRITE;
+ }
+ if( useHostPtr ) {
+ flags |= CL_MEM_USE_HOST_PTR;
+ }
+
+ ::size_t size = sizeof(DataType)*(endIterator - startIterator);
+
+ Context context = Context::getDefault(err);
+
+ if( useHostPtr ) {
+ object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
+ } else {
+ object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
+ }
+
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ if( !useHostPtr ) {
+ error = cl::copy(startIterator, endIterator, *this);
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+ }
+
+ /*!
+ * \brief Construct a Buffer from a host container via iterators using a specified context.
+ * IteratorType must be random access.
+ * If useHostPtr is specified iterators must represent contiguous data.
+ */
+ template< typename IteratorType >
+ Buffer(const Context &context, IteratorType startIterator, IteratorType endIterator,
+ bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
+
+ //! \brief Default constructor - initializes to NULL.
+ Buffer() : Memory() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Buffer(const Buffer& buffer) : Memory(buffer) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * See Memory for further details.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS Buffer(const cl_mem& buffer) : Memory(buffer) { }
+
+ /*! \brief Assignment from Buffer - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Buffer& operator = (const Buffer& rhs)
+ {
+ if (this != &rhs) {
+ Memory::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Buffer& operator = (const cl_mem& rhs)
+ {
+ Memory::operator=(rhs);
+ return *this;
+ }
+
+#if defined(CL_VERSION_1_1)
+ /*! \brief Creates a new buffer object from this.
+ *
+ * Wraps clCreateSubBuffer().
+ */
+ Buffer createSubBuffer(
+ cl_mem_flags flags,
+ cl_buffer_create_type buffer_create_type,
+ const void * buffer_create_info,
+ cl_int * err = NULL)
+ {
+ Buffer result;
+ cl_int error;
+ result.object_ = ::clCreateSubBuffer(
+ object_,
+ flags,
+ buffer_create_type,
+ buffer_create_info,
+ &error);
+
+ detail::errHandler(error, __CREATE_SUBBUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ return result;
+ }
+#endif
+};
+
+#if defined (USE_DX_INTEROP)
+/*! \brief Class interface for creating OpenCL buffers from ID3D10Buffer's.
+ *
+ * This is provided to facilitate interoperability with Direct3D.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class BufferD3D10 : public Buffer
+{
+public:
+ typedef CL_API_ENTRY cl_mem (CL_API_CALL *PFN_clCreateFromD3D10BufferKHR)(
+ cl_context context, cl_mem_flags flags, ID3D10Buffer* buffer,
+ cl_int* errcode_ret);
+
+ /*! \brief Constructs a BufferD3D10, in a specified context, from a
+ * given ID3D10Buffer.
+ *
+ * Wraps clCreateFromD3D10BufferKHR().
+ */
+ BufferD3D10(
+ const Context& context,
+ cl_mem_flags flags,
+ ID3D10Buffer* bufobj,
+ cl_int * err = NULL)
+ {
+ static PFN_clCreateFromD3D10BufferKHR pfn_clCreateFromD3D10BufferKHR = NULL;
+
+#if defined(CL_VERSION_1_2)
+ vector<cl_context_properties> props = context.getInfo<CL_CONTEXT_PROPERTIES>();
+ cl_platform platform = -1;
+ for( int i = 0; i < props.size(); ++i ) {
+ if( props[i] == CL_CONTEXT_PLATFORM ) {
+ platform = props[i+1];
+ }
+ }
+ __INIT_CL_EXT_FCN_PTR_PLATFORM(platform, clCreateFromD3D10BufferKHR);
+#endif
+#if defined(CL_VERSION_1_1)
+ __INIT_CL_EXT_FCN_PTR(clCreateFromD3D10BufferKHR);
+#endif
+
+ cl_int error;
+ object_ = pfn_clCreateFromD3D10BufferKHR(
+ context(),
+ flags,
+ bufobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ BufferD3D10() : Buffer() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferD3D10(const BufferD3D10& buffer) : Buffer(buffer) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * See Memory for further details.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS BufferD3D10(const cl_mem& buffer) : Buffer(buffer) { }
+
+ /*! \brief Assignment from BufferD3D10 - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferD3D10& operator = (const BufferD3D10& rhs)
+ {
+ if (this != &rhs) {
+ Buffer::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferD3D10& operator = (const cl_mem& rhs)
+ {
+ Buffer::operator=(rhs);
+ return *this;
+ }
+};
+#endif
+
+/*! \brief Class interface for GL Buffer Memory Objects.
+ *
+ * This is provided to facilitate interoperability with OpenGL.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class BufferGL : public Buffer
+{
+public:
+ /*! \brief Constructs a BufferGL in a specified context, from a given
+ * GL buffer.
+ *
+ * Wraps clCreateFromGLBuffer().
+ */
+ BufferGL(
+ const Context& context,
+ cl_mem_flags flags,
+ GLuint bufobj,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateFromGLBuffer(
+ context(),
+ flags,
+ bufobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ BufferGL() : Buffer() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferGL(const BufferGL& buffer) : Buffer(buffer) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * See Memory for further details.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS BufferGL(const cl_mem& buffer) : Buffer(buffer) { }
+
+ /*! \brief Assignment from BufferGL - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferGL& operator = (const BufferGL& rhs)
+ {
+ if (this != &rhs) {
+ Buffer::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferGL& operator = (const cl_mem& rhs)
+ {
+ Buffer::operator=(rhs);
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetGLObjectInfo().
+ cl_int getObjectInfo(
+ cl_gl_object_type *type,
+ GLuint * gl_object_name)
+ {
+ return detail::errHandler(
+ ::clGetGLObjectInfo(object_,type,gl_object_name),
+ __GET_GL_OBJECT_INFO_ERR);
+ }
+};
+
+/*! \brief Class interface for GL Render Buffer Memory Objects.
+ *
+ * This is provided to facilitate interoperability with OpenGL.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class BufferRenderGL : public Buffer
+{
+public:
+ /*! \brief Constructs a BufferRenderGL in a specified context, from a given
+ * GL Renderbuffer.
+ *
+ * Wraps clCreateFromGLRenderbuffer().
+ */
+ BufferRenderGL(
+ const Context& context,
+ cl_mem_flags flags,
+ GLuint bufobj,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateFromGLRenderbuffer(
+ context(),
+ flags,
+ bufobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_RENDER_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ BufferRenderGL() : Buffer() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferRenderGL(const BufferGL& buffer) : Buffer(buffer) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * See Memory for further details.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS BufferRenderGL(const cl_mem& buffer) : Buffer(buffer) { }
+
+ /*! \brief Assignment from BufferGL - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferRenderGL& operator = (const BufferRenderGL& rhs)
+ {
+ if (this != &rhs) {
+ Buffer::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferRenderGL& operator = (const cl_mem& rhs)
+ {
+ Buffer::operator=(rhs);
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetGLObjectInfo().
+ cl_int getObjectInfo(
+ cl_gl_object_type *type,
+ GLuint * gl_object_name)
+ {
+ return detail::errHandler(
+ ::clGetGLObjectInfo(object_,type,gl_object_name),
+ __GET_GL_OBJECT_INFO_ERR);
+ }
+};
+
+/*! \brief C++ base class for Image Memory objects.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Image : public Memory
+{
+protected:
+ //! \brief Default constructor - initializes to NULL.
+ Image() : Memory() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image(const Image& image) : Memory(image) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * See Memory for further details.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS Image(const cl_mem& image) : Memory(image) { }
+
+ /*! \brief Assignment from Image - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image& operator = (const Image& rhs)
+ {
+ if (this != &rhs) {
+ Memory::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image& operator = (const cl_mem& rhs)
+ {
+ Memory::operator=(rhs);
+ return *this;
+ }
+
+public:
+ //! \brief Wrapper for clGetImageInfo().
+ template <typename T>
+ cl_int getImageInfo(cl_image_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetImageInfo, object_, name, param),
+ __GET_IMAGE_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetImageInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_image_info, name>::param_type
+ getImageInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_image_info, name>::param_type param;
+ cl_int result = getImageInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+};
+
+#if defined(CL_VERSION_1_2)
+/*! \brief Class interface for 1D Image Memory objects.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Image1D : public Image
+{
+public:
+ /*! \brief Constructs a 1D Image in a specified context.
+ *
+ * Wraps clCreateImage().
+ */
+ Image1D(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ ::size_t width,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE1D,
+ width,
+ 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ host_ptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Image1D() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image1D(const Image1D& image1D) : Image(image1D) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * See Memory for further details.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS Image1D(const cl_mem& image1D) : Image(image1D) { }
+
+ /*! \brief Assignment from Image1D - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image1D& operator = (const Image1D& rhs)
+ {
+ if (this != &rhs) {
+ Image::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image1D& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+};
+
+/*! \class Image1DBuffer
+ * \brief Image interface for 1D buffer images.
+ */
+class Image1DBuffer : public Image
+{
+public:
+ Image1DBuffer(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ ::size_t width,
+ const Buffer &buffer,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE1D_BUFFER,
+ width,
+ 0, 0, 0, 0, 0, 0, 0,
+ buffer()
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ NULL,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Image1DBuffer() { }
+
+ Image1DBuffer(const Image1DBuffer& image1D) : Image(image1D) { }
+
+ __CL_EXPLICIT_CONSTRUCTORS Image1DBuffer(const cl_mem& image1D) : Image(image1D) { }
+
+ Image1DBuffer& operator = (const Image1DBuffer& rhs)
+ {
+ if (this != &rhs) {
+ Image::operator=(rhs);
+ }
+ return *this;
+ }
+
+ Image1DBuffer& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+};
+
+/*! \class Image1DArray
+ * \brief Image interface for arrays of 1D images.
+ */
+class Image1DArray : public Image
+{
+public:
+ Image1DArray(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ ::size_t arraySize,
+ ::size_t width,
+ ::size_t rowPitch,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE1D_ARRAY,
+ width,
+ 0, 0, // height, depth (unused)
+ arraySize,
+ rowPitch,
+ 0, 0, 0, 0
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ host_ptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Image1DArray() { }
+
+ Image1DArray(const Image1DArray& imageArray) : Image(imageArray) { }
+
+ __CL_EXPLICIT_CONSTRUCTORS Image1DArray(const cl_mem& imageArray) : Image(imageArray) { }
+
+ Image1DArray& operator = (const Image1DArray& rhs)
+ {
+ if (this != &rhs) {
+ Image::operator=(rhs);
+ }
+ return *this;
+ }
+
+ Image1DArray& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+};
+#endif // #if defined(CL_VERSION_1_2)
+
+
+/*! \brief Class interface for 2D Image Memory objects.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Image2D : public Image
+{
+public:
+ /*! \brief Constructs a 1D Image in a specified context.
+ *
+ * Wraps clCreateImage().
+ */
+ Image2D(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ ::size_t width,
+ ::size_t height,
+ ::size_t row_pitch = 0,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ bool useCreateImage;
+
+#if defined(CL_VERSION_1_2) && defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+ // Run-time decision based on the actual platform
+ {
+ cl_uint version = detail::getContextPlatformVersion(context());
+ useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
+ }
+#elif defined(CL_VERSION_1_2)
+ useCreateImage = true;
+#else
+ useCreateImage = false;
+#endif
+
+#if defined(CL_VERSION_1_2)
+ if (useCreateImage)
+ {
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE2D,
+ width,
+ height,
+ 0, 0, // depth, array size (unused)
+ row_pitch,
+ 0, 0, 0, 0
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ host_ptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+#endif // #if defined(CL_VERSION_1_2)
+#if !defined(CL_VERSION_1_2) || defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+ if (!useCreateImage)
+ {
+ object_ = ::clCreateImage2D(
+ context(), flags,&format, width, height, row_pitch, host_ptr, &error);
+
+ detail::errHandler(error, __CREATE_IMAGE2D_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+#endif // #if !defined(CL_VERSION_1_2) || defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Image2D() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image2D(const Image2D& image2D) : Image(image2D) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * See Memory for further details.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS Image2D(const cl_mem& image2D) : Image(image2D) { }
+
+ /*! \brief Assignment from Image2D - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image2D& operator = (const Image2D& rhs)
+ {
+ if (this != &rhs) {
+ Image::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image2D& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+};
+
+
+#if !defined(CL_VERSION_1_2)
+/*! \brief Class interface for GL 2D Image Memory objects.
+ *
+ * This is provided to facilitate interoperability with OpenGL.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ * \note Deprecated for OpenCL 1.2. Please use ImageGL instead.
+ */
+class CL_EXT_PREFIX__VERSION_1_1_DEPRECATED Image2DGL CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED : public Image2D
+{
+public:
+ /*! \brief Constructs an Image2DGL in a specified context, from a given
+ * GL Texture.
+ *
+ * Wraps clCreateFromGLTexture2D().
+ */
+ Image2DGL(
+ const Context& context,
+ cl_mem_flags flags,
+ GLenum target,
+ GLint miplevel,
+ GLuint texobj,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateFromGLTexture2D(
+ context(),
+ flags,
+ target,
+ miplevel,
+ texobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_TEXTURE_2D_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Image2DGL() : Image2D() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image2DGL(const Image2DGL& image) : Image2D(image) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * See Memory for further details.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS Image2DGL(const cl_mem& image) : Image2D(image) { }
+
+ /*! \brief Assignment from Image2DGL - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image2DGL& operator = (const Image2DGL& rhs)
+ {
+ if (this != &rhs) {
+ Image2D::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image2DGL& operator = (const cl_mem& rhs)
+ {
+ Image2D::operator=(rhs);
+ return *this;
+ }
+};
+#endif // #if !defined(CL_VERSION_1_2)
+
+#if defined(CL_VERSION_1_2)
+/*! \class Image2DArray
+ * \brief Image interface for arrays of 2D images.
+ */
+class Image2DArray : public Image
+{
+public:
+ Image2DArray(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ ::size_t arraySize,
+ ::size_t width,
+ ::size_t height,
+ ::size_t rowPitch,
+ ::size_t slicePitch,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE2D_ARRAY,
+ width,
+ height,
+ 0, // depth (unused)
+ arraySize,
+ rowPitch,
+ slicePitch,
+ 0, 0, 0
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ host_ptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Image2DArray() { }
+
+ Image2DArray(const Image2DArray& imageArray) : Image(imageArray) { }
+
+ __CL_EXPLICIT_CONSTRUCTORS Image2DArray(const cl_mem& imageArray) : Image(imageArray) { }
+
+ Image2DArray& operator = (const Image2DArray& rhs)
+ {
+ if (this != &rhs) {
+ Image::operator=(rhs);
+ }
+ return *this;
+ }
+
+ Image2DArray& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+};
+#endif // #if defined(CL_VERSION_1_2)
+
+/*! \brief Class interface for 3D Image Memory objects.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Image3D : public Image
+{
+public:
+ /*! \brief Constructs a 3D Image in a specified context.
+ *
+ * Wraps clCreateImage().
+ */
+ Image3D(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ ::size_t width,
+ ::size_t height,
+ ::size_t depth,
+ ::size_t row_pitch = 0,
+ ::size_t slice_pitch = 0,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ bool useCreateImage;
+
+#if defined(CL_VERSION_1_2) && defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+ // Run-time decision based on the actual platform
+ {
+ cl_uint version = detail::getContextPlatformVersion(context());
+ useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
+ }
+#elif defined(CL_VERSION_1_2)
+ useCreateImage = true;
+#else
+ useCreateImage = false;
+#endif
+
+#if defined(CL_VERSION_1_2)
+ if (useCreateImage)
+ {
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE3D,
+ width,
+ height,
+ depth,
+ 0, // array size (unused)
+ row_pitch,
+ slice_pitch,
+ 0, 0, 0
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ host_ptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+#endif // #if defined(CL_VERSION_1_2)
+#if !defined(CL_VERSION_1_2) || defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+ if (!useCreateImage)
+ {
+ object_ = ::clCreateImage3D(
+ context(), flags, &format, width, height, depth, row_pitch,
+ slice_pitch, host_ptr, &error);
+
+ detail::errHandler(error, __CREATE_IMAGE3D_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+#endif // #if !defined(CL_VERSION_1_2) || defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Image3D() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image3D(const Image3D& image3D) : Image(image3D) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * See Memory for further details.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS Image3D(const cl_mem& image3D) : Image(image3D) { }
+
+ /*! \brief Assignment from Image3D - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image3D& operator = (const Image3D& rhs)
+ {
+ if (this != &rhs) {
+ Image::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image3D& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+};
+
+#if !defined(CL_VERSION_1_2)
+/*! \brief Class interface for GL 3D Image Memory objects.
+ *
+ * This is provided to facilitate interoperability with OpenGL.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Image3DGL : public Image3D
+{
+public:
+ /*! \brief Constructs an Image3DGL in a specified context, from a given
+ * GL Texture.
+ *
+ * Wraps clCreateFromGLTexture3D().
+ */
+ Image3DGL(
+ const Context& context,
+ cl_mem_flags flags,
+ GLenum target,
+ GLint miplevel,
+ GLuint texobj,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateFromGLTexture3D(
+ context(),
+ flags,
+ target,
+ miplevel,
+ texobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_TEXTURE_3D_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Image3DGL() : Image3D() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image3DGL(const Image3DGL& image) : Image3D(image) { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * See Memory for further details.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS Image3DGL(const cl_mem& image) : Image3D(image) { }
+
+ /*! \brief Assignment from Image3DGL - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image3DGL& operator = (const Image3DGL& rhs)
+ {
+ if (this != &rhs) {
+ Image3D::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image3DGL& operator = (const cl_mem& rhs)
+ {
+ Image3D::operator=(rhs);
+ return *this;
+ }
+};
+#endif // #if !defined(CL_VERSION_1_2)
+
+#if defined(CL_VERSION_1_2)
+/*! \class ImageGL
+ * \brief general image interface for GL interop.
+ * We abstract the 2D and 3D GL images into a single instance here
+ * that wraps all GL sourced images on the grounds that setup information
+ * was performed by OpenCL anyway.
+ */
+class ImageGL : public Image
+{
+public:
+ ImageGL(
+ const Context& context,
+ cl_mem_flags flags,
+ GLenum target,
+ GLint miplevel,
+ GLuint texobj,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateFromGLTexture(
+ context(),
+ flags,
+ target,
+ miplevel,
+ texobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_TEXTURE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ ImageGL() : Image() { }
+
+ ImageGL(const ImageGL& image) : Image(image) { }
+
+ __CL_EXPLICIT_CONSTRUCTORS ImageGL(const cl_mem& image) : Image(image) { }
+
+ ImageGL& operator = (const ImageGL& rhs)
+ {
+ if (this != &rhs) {
+ Image::operator=(rhs);
+ }
+ return *this;
+ }
+
+ ImageGL& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+};
+#endif // #if defined(CL_VERSION_1_2)
+
+/*! \brief Class interface for cl_sampler.
+ *
+ * \note Copies of these objects are shallow, meaning that the copy will refer
+ * to the same underlying cl_sampler as the original. For details, see
+ * clRetainSampler() and clReleaseSampler().
+ *
+ * \see cl_sampler
+ */
+class Sampler : public detail::Wrapper<cl_sampler>
+{
+public:
+ /*! \brief Destructor.
+ *
+ * This calls clReleaseSampler() on the value held by this instance.
+ */
+ ~Sampler() { }
+
+ //! \brief Default constructor - initializes to NULL.
+ Sampler() { }
+
+ /*! \brief Constructs a Sampler in a specified context.
+ *
+ * Wraps clCreateSampler().
+ */
+ Sampler(
+ const Context& context,
+ cl_bool normalized_coords,
+ cl_addressing_mode addressing_mode,
+ cl_filter_mode filter_mode,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateSampler(
+ context(),
+ normalized_coords,
+ addressing_mode,
+ filter_mode,
+ &error);
+
+ detail::errHandler(error, __CREATE_SAMPLER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * This calls clRetainSampler() on the parameter's cl_sampler.
+ */
+ Sampler(const Sampler& sampler) : detail::Wrapper<cl_type>(sampler) { }
+
+ /*! \brief Constructor from cl_sampler - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the cl_sampler
+ * into the new Sampler object.
+ */
+ Sampler(const cl_sampler& sampler) : detail::Wrapper<cl_type>(sampler) { }
+
+ /*! \brief Assignment operator from Sampler.
+ *
+ * This calls clRetainSampler() on the parameter and clReleaseSampler()
+ * on the previous value held by this instance.
+ */
+ Sampler& operator = (const Sampler& rhs)
+ {
+ if (this != &rhs) {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment operator from cl_sampler - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the rhs and calls
+ * clReleaseSampler() on the value previously held by this instance.
+ */
+ Sampler& operator = (const cl_sampler& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetSamplerInfo().
+ template <typename T>
+ cl_int getInfo(cl_sampler_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetSamplerInfo, object_, name, param),
+ __GET_SAMPLER_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetSamplerInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_sampler_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_sampler_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+};
+
+class Program;
+class CommandQueue;
+class Kernel;
+
+//! \brief Class interface for specifying NDRange values.
+class NDRange
+{
+private:
+ size_t<3> sizes_;
+ cl_uint dimensions_;
+
+public:
+ //! \brief Default constructor - resulting range has zero dimensions.
+ NDRange()
+ : dimensions_(0)
+ { }
+
+ //! \brief Constructs one-dimensional range.
+ NDRange(::size_t size0)
+ : dimensions_(1)
+ {
+ sizes_[0] = size0;
+ }
+
+ //! \brief Constructs two-dimensional range.
+ NDRange(::size_t size0, ::size_t size1)
+ : dimensions_(2)
+ {
+ sizes_[0] = size0;
+ sizes_[1] = size1;
+ }
+
+ //! \brief Constructs three-dimensional range.
+ NDRange(::size_t size0, ::size_t size1, ::size_t size2)
+ : dimensions_(3)
+ {
+ sizes_[0] = size0;
+ sizes_[1] = size1;
+ sizes_[2] = size2;
+ }
+
+ /*! \brief Conversion operator to const ::size_t *.
+ *
+ * \returns a pointer to the size of the first dimension.
+ */
+ operator const ::size_t*() const {
+ return (const ::size_t*) sizes_;
+ }
+
+ //! \brief Queries the number of dimensions in the range.
+ ::size_t dimensions() const { return dimensions_; }
+};
+
+//! \brief A zero-dimensional range.
+static const NDRange NullRange;
+
+//! \brief Local address wrapper for use with Kernel::setArg
+struct LocalSpaceArg
+{
+ ::size_t size_;
+};
+
+namespace detail {
+
+template <typename T>
+struct KernelArgumentHandler
+{
+ static ::size_t size(const T&) { return sizeof(T); }
+ static T* ptr(T& value) { return &value; }
+};
+
+template <>
+struct KernelArgumentHandler<LocalSpaceArg>
+{
+ static ::size_t size(const LocalSpaceArg& value) { return value.size_; }
+ static void* ptr(LocalSpaceArg&) { return NULL; }
+};
+
+}
+//! \endcond
+
+/*! __local
+ * \brief Helper function for generating LocalSpaceArg objects.
+ * Deprecated. Replaced with Local.
+ */
+inline CL_EXT_PREFIX__VERSION_1_1_DEPRECATED LocalSpaceArg
+__local(::size_t size) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+inline LocalSpaceArg
+__local(::size_t size)
+{
+ LocalSpaceArg ret = { size };
+ return ret;
+}
+
+/*! Local
+ * \brief Helper function for generating LocalSpaceArg objects.
+ */
+inline LocalSpaceArg
+Local(::size_t size)
+{
+ LocalSpaceArg ret = { size };
+ return ret;
+}
+
+//class KernelFunctor;
+
+/*! \brief Class interface for cl_kernel.
+ *
+ * \note Copies of these objects are shallow, meaning that the copy will refer
+ * to the same underlying cl_kernel as the original. For details, see
+ * clRetainKernel() and clReleaseKernel().
+ *
+ * \see cl_kernel
+ */
+class Kernel : public detail::Wrapper<cl_kernel>
+{
+public:
+ inline Kernel(const Program& program, const char* name, cl_int* err = NULL);
+
+ /*! \brief Destructor.
+ *
+ * This calls clReleaseKernel() on the value held by this instance.
+ */
+ ~Kernel() { }
+
+ //! \brief Default constructor - initializes to NULL.
+ Kernel() { }
+
+ /*! \brief Copy constructor - performs shallow copy.
+ *
+ * This calls clRetainKernel() on the parameter's cl_kernel.
+ */
+ Kernel(const Kernel& kernel) : detail::Wrapper<cl_type>(kernel) { }
+
+ /*! \brief Constructor from cl_kernel - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the cl_kernel
+ * into the new Kernel object.
+ */
+ __CL_EXPLICIT_CONSTRUCTORS Kernel(const cl_kernel& kernel) : detail::Wrapper<cl_type>(kernel) { }
+
+ /*! \brief Assignment operator from Kernel.
+ *
+ * This calls clRetainKernel() on the parameter and clReleaseKernel()
+ * on the previous value held by this instance.
+ */
+ Kernel& operator = (const Kernel& rhs)
+ {
+ if (this != &rhs) {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ }
+ return *this;
+ }
+
+ /*! \brief Assignment operator from cl_kernel - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the rhs and calls
+ * clReleaseKernel() on the value previously held by this instance.
+ */
+ Kernel& operator = (const cl_kernel& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ template <typename T>
+ cl_int getInfo(cl_kernel_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetKernelInfo, object_, name, param),
+ __GET_KERNEL_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_kernel_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_kernel_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+#if defined(CL_VERSION_1_2)
+ template <typename T>
+ cl_int getArgInfo(cl_uint argIndex, cl_kernel_arg_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetKernelArgInfo, object_, argIndex, name, param),
+ __GET_KERNEL_ARG_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_kernel_arg_info, name>::param_type
+ getArgInfo(cl_uint argIndex, cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_kernel_arg_info, name>::param_type param;
+ cl_int result = getArgInfo(argIndex, name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+#endif // #if defined(CL_VERSION_1_2)
+
+ template <typename T>
+ cl_int getWorkGroupInfo(
+ const Device& device, cl_kernel_work_group_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(
+ &::clGetKernelWorkGroupInfo, object_, device(), name, param),
+ __GET_KERNEL_WORK_GROUP_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_kernel_work_group_info, name>::param_type
+ getWorkGroupInfo(const Device& device, cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_kernel_work_group_info, name>::param_type param;
+ cl_int result = getWorkGroupInfo(device, name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ template <typename T>
+ cl_int setArg(cl_uint index, T value)
+ {
+ return detail::errHandler(
+ ::clSetKernelArg(
+ object_,
+ index,
+ detail::KernelArgumentHandler<T>::size(value),
+ detail::KernelArgumentHandler<T>::ptr(value)),
+ __SET_KERNEL_ARGS_ERR);
+ }
+
+ cl_int setArg(cl_uint index, ::size_t size, void* argPtr)
+ {
+ return detail::errHandler(
+ ::clSetKernelArg(object_, index, size, argPtr),
+ __SET_KERNEL_ARGS_ERR);
+ }
+};
+
+/*! \class Program
+ * \brief Program interface that implements cl_program.
+ */
+class Program : public detail::Wrapper<cl_program>
+{
+public:
+ typedef VECTOR_CLASS<std::pair<const void*, ::size_t> > Binaries;
+ typedef VECTOR_CLASS<std::pair<const char*, ::size_t> > Sources;
+
+ Program(
+ const STRING_CLASS& source,
+ bool build = false,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ const char * strings = source.c_str();
+ const ::size_t length = source.size();
+
+ Context context = Context::getDefault(err);
+
+ object_ = ::clCreateProgramWithSource(
+ context(), (cl_uint)1, &strings, &length, &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
+
+ if (error == CL_SUCCESS && build) {
+
+ error = ::clBuildProgram(
+ object_,
+ 0,
+ NULL,
+ "",
+ NULL,
+ NULL);
+
+ detail::errHandler(error, __BUILD_PROGRAM_ERR);
+ }
+
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Program(
+ const Context& context,
+ const STRING_CLASS& source,
+ bool build = false,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ const char * strings = source.c_str();
+ const ::size_t length = source.size();
+
+ object_ = ::clCreateProgramWithSource(
+ context(), (cl_uint)1, &strings, &length, &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
+
+ if (error == CL_SUCCESS && build) {
+
+ error = ::clBuildProgram(
+ object_,
+ 0,
+ NULL,
+ "",
+ NULL,
+ NULL);
+
+ detail::errHandler(error, __BUILD_PROGRAM_ERR);
+ }
+
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Program(
+ const Context& context,
+ const Sources& sources,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ const ::size_t n = (::size_t)sources.size();
+ ::size_t* lengths = (::size_t*) alloca(n * sizeof(::size_t));
+ const char** strings = (const char**) alloca(n * sizeof(const char*));
+
+ for (::size_t i = 0; i < n; ++i) {
+ strings[i] = sources[(int)i].first;
+ lengths[i] = sources[(int)i].second;
+ }
+
+ object_ = ::clCreateProgramWithSource(
+ context(), (cl_uint)n, strings, lengths, &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /**
+ * Construct a program object from a list of devices and a per-device list of binaries.
+ * \param context A valid OpenCL context in which to construct the program.
+ * \param devices A vector of OpenCL device objects for which the program will be created.
+ * \param binaries A vector of pairs of a pointer to a binary object and its length.
+ * \param binaryStatus An optional vector that on completion will be resized to
+ * match the size of binaries and filled with values to specify if each binary
+ * was successfully loaded.
+ * Set to CL_SUCCESS if the binary was successfully loaded.
+ * Set to CL_INVALID_VALUE if the length is 0 or the binary pointer is NULL.
+ * Set to CL_INVALID_BINARY if the binary provided is not valid for the matching device.
+ * \param err if non-NULL will be set to CL_SUCCESS on successful operation or one of the following errors:
+ * CL_INVALID_CONTEXT if context is not a valid context.
+ * CL_INVALID_VALUE if the length of devices is zero; or if the length of binaries does not match the length of devices;
+ * or if any entry in binaries is NULL or has length 0.
+ * CL_INVALID_DEVICE if OpenCL devices listed in devices are not in the list of devices associated with context.
+ * CL_INVALID_BINARY if an invalid program binary was encountered for any device. binaryStatus will return specific status for each device.
+ * CL_OUT_OF_HOST_MEMORY if there is a failure to allocate resources required by the OpenCL implementation on the host.
+ */
+ Program(
+ const Context& context,
+ const VECTOR_CLASS<Device>& devices,
+ const Binaries& binaries,
+ VECTOR_CLASS<cl_int>* binaryStatus = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ const ::size_t numDevices = devices.size();
+
+ // Catch size mismatch early and return
+ if(binaries.size() != numDevices) {
+ error = CL_INVALID_VALUE;
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ return;
+ }
+
+ ::size_t* lengths = (::size_t*) alloca(numDevices * sizeof(::size_t));
+ const unsigned char** images = (const unsigned char**) alloca(numDevices * sizeof(const unsigned char**));
+
+ for (::size_t i = 0; i < numDevices; ++i) {
+ images[i] = (const unsigned char*)binaries[i].first;
+ lengths[i] = binaries[(int)i].second;
+ }
+
+ cl_device_id* deviceIDs = (cl_device_id*) alloca(numDevices * sizeof(cl_device_id));
+ for( ::size_t deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
+ deviceIDs[deviceIndex] = (devices[deviceIndex])();
+ }
+
+ if(binaryStatus) {
+ binaryStatus->resize(numDevices);
+ }
+
+ object_ = ::clCreateProgramWithBinary(
+ context(), (cl_uint) devices.size(),
+ deviceIDs,
+ lengths, images, binaryStatus != NULL
+ ? &binaryStatus->front()
+ : NULL, &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+
+#if defined(CL_VERSION_1_2)
+ /**
+ * Create program using builtin kernels.
+ * \param kernelNames Semi-colon separated list of builtin kernel names
+ */
+ Program(
+ const Context& context,
+ const VECTOR_CLASS<Device>& devices,
+ const STRING_CLASS& kernelNames,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+
+ ::size_t numDevices = devices.size();
+ cl_device_id* deviceIDs = (cl_device_id*) alloca(numDevices * sizeof(cl_device_id));
+ for( ::size_t deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
+ deviceIDs[deviceIndex] = (devices[deviceIndex])();
+ }
+
+ object_ = ::clCreateProgramWithBuiltInKernels(
+ context(),
+ (cl_uint) devices.size(),
+ deviceIDs,
+ kernelNames.c_str(),
+ &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+#endif // #if defined(CL_VERSION_1_2)
+
+ Program() { }
+
+ Program(const Program& program) : detail::Wrapper<cl_type>(program) { }
+
+ __CL_EXPLICIT_CONSTRUCTORS Program(const cl_program& program) : detail::Wrapper<cl_type>(program) { }
+
+ Program& operator = (const Program& rhs)
+ {
+ if (this != &rhs) {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ }
+ return *this;
+ }
+
+ Program& operator = (const cl_program& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ cl_int build(
+ const VECTOR_CLASS<Device>& devices,
+ const char* options = NULL,
+ void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
+ void* data = NULL) const
+ {
+ ::size_t numDevices = devices.size();
+ cl_device_id* deviceIDs = (cl_device_id*) alloca(numDevices * sizeof(cl_device_id));
+ for( ::size_t deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
+ deviceIDs[deviceIndex] = (devices[deviceIndex])();
+ }
+
+ return detail::errHandler(
+ ::clBuildProgram(
+ object_,
+ (cl_uint)
+ devices.size(),
+ deviceIDs,
+ options,
+ notifyFptr,
+ data),
+ __BUILD_PROGRAM_ERR);
+ }
+
+ cl_int build(
+ const char* options = NULL,
+ void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
+ void* data = NULL) const
+ {
+ return detail::errHandler(
+ ::clBuildProgram(
+ object_,
+ 0,
+ NULL,
+ options,
+ notifyFptr,
+ data),
+ __BUILD_PROGRAM_ERR);
+ }
+
+#if defined(CL_VERSION_1_2)
+ cl_int compile(
+ const char* options = NULL,
+ void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
+ void* data = NULL) const
+ {
+ return detail::errHandler(
+ ::clCompileProgram(
+ object_,
+ 0,
+ NULL,
+ options,
+ 0,
+ NULL,
+ NULL,
+ notifyFptr,
+ data),
+ __COMPILE_PROGRAM_ERR);
+ }
+#endif
+
+ template <typename T>
+ cl_int getInfo(cl_program_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetProgramInfo, object_, name, param),
+ __GET_PROGRAM_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_program_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_program_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ template <typename T>
+ cl_int getBuildInfo(
+ const Device& device, cl_program_build_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(
+ &::clGetProgramBuildInfo, object_, device(), name, param),
+ __GET_PROGRAM_BUILD_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_program_build_info, name>::param_type
+ getBuildInfo(const Device& device, cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_program_build_info, name>::param_type param;
+ cl_int result = getBuildInfo(device, name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ cl_int createKernels(VECTOR_CLASS<Kernel>* kernels)
+ {
+ cl_uint numKernels;
+ cl_int err = ::clCreateKernelsInProgram(object_, 0, NULL, &numKernels);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
+ }
+
+ Kernel* value = (Kernel*) alloca(numKernels * sizeof(Kernel));
+ err = ::clCreateKernelsInProgram(
+ object_, numKernels, (cl_kernel*) value, NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
+ }
+
+ kernels->assign(&value[0], &value[numKernels]);
+ return CL_SUCCESS;
+ }
+};
+
+#if defined(CL_VERSION_1_2)
+inline Program linkProgram(
+ Program input1,
+ Program input2,
+ const char* options = NULL,
+ void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
+ void* data = NULL,
+ cl_int* err = NULL)
+{
+ cl_int err_local = CL_SUCCESS;
+
+ cl_program programs[2] = { input1(), input2() };
+
+ Context ctx = input1.getInfo<CL_PROGRAM_CONTEXT>();
+
+ cl_program prog = ::clLinkProgram(
+ ctx(),
+ 0,
+ NULL,
+ options,
+ 2,
+ programs,
+ notifyFptr,
+ data,
+ &err_local);
+
+ detail::errHandler(err_local,__COMPILE_PROGRAM_ERR);
+ if (err != NULL) {
+ *err = err_local;
+ }
+
+ return Program(prog);
+}
+
+inline Program linkProgram(
+ VECTOR_CLASS<Program> inputPrograms,
+ const char* options = NULL,
+ void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
+ void* data = NULL,
+ cl_int* err = NULL)
+{
+ cl_int err_local = CL_SUCCESS;
+
+ cl_program * programs = (cl_program*) alloca(inputPrograms.size() * sizeof(cl_program));
+
+ if (programs != NULL) {
+ for (unsigned int i = 0; i < inputPrograms.size(); i++) {
+ programs[i] = inputPrograms[i]();
+ }
+ }
+
+ cl_program prog = ::clLinkProgram(
+ Context::getDefault()(),
+ 0,
+ NULL,
+ options,
+ (cl_uint)inputPrograms.size(),
+ programs,
+ notifyFptr,
+ data,
+ &err_local);
+
+ detail::errHandler(err_local,__COMPILE_PROGRAM_ERR);
+ if (err != NULL) {
+ *err = err_local;
+ }
+
+ return Program(prog);
+}
+#endif
+
+template<>
+inline VECTOR_CLASS<char *> cl::Program::getInfo<CL_PROGRAM_BINARIES>(cl_int* err) const
+{
+ VECTOR_CLASS< ::size_t> sizes = getInfo<CL_PROGRAM_BINARY_SIZES>();
+ VECTOR_CLASS<char *> binaries;
+ for (VECTOR_CLASS< ::size_t>::iterator s = sizes.begin(); s != sizes.end(); ++s)
+ {
+ char *ptr = NULL;
+ if (*s != 0)
+ ptr = new char[*s];
+ binaries.push_back(ptr);
+ }
+
+ cl_int result = getInfo(CL_PROGRAM_BINARIES, &binaries);
+ if (err != NULL) {
+ *err = result;
+ }
+ return binaries;
+}
+
+inline Kernel::Kernel(const Program& program, const char* name, cl_int* err)
+{
+ cl_int error;
+
+ object_ = ::clCreateKernel(program(), name, &error);
+ detail::errHandler(error, __CREATE_KERNEL_ERR);
+
+ if (err != NULL) {
+ *err = error;
+ }
+
+}
+
+/*! \class CommandQueue
+ * \brief CommandQueue interface for cl_command_queue.
+ */
+class CommandQueue : public detail::Wrapper<cl_command_queue>
+{
+private:
+ static volatile int default_initialized_;
+ static CommandQueue default_;
+ static volatile cl_int default_error_;
+public:
+ CommandQueue(
+ cl_command_queue_properties properties,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ Context context = Context::getDefault(&error);
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+
+ if (error != CL_SUCCESS) {
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+ else {
+ Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
+
+ object_ = ::clCreateCommandQueue(
+ context(), device(), properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+ }
+ /*!
+ * \brief Constructs a CommandQueue for an implementation defined device in the given context
+ */
+ explicit CommandQueue(
+ const Context& context,
+ cl_command_queue_properties properties = 0,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ VECTOR_CLASS<cl::Device> devices;
+ error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+
+ if (error != CL_SUCCESS)
+ {
+ if (err != NULL) {
+ *err = error;
+ }
+ return;
+ }
+
+ object_ = ::clCreateCommandQueue(context(), devices[0](), properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+
+ if (err != NULL) {
+ *err = error;
+ }
+
+ }
+
+ CommandQueue(
+ const Context& context,
+ const Device& device,
+ cl_command_queue_properties properties = 0,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateCommandQueue(
+ context(), device(), properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ static CommandQueue getDefault(cl_int * err = NULL)
+ {
+ int state = detail::compare_exchange(
+ &default_initialized_,
+ __DEFAULT_BEING_INITIALIZED, __DEFAULT_NOT_INITIALIZED);
+
+ if (state & __DEFAULT_INITIALIZED) {
+ if (err != NULL) {
+ *err = default_error_;
+ }
+ return default_;
+ }
+
+ if (state & __DEFAULT_BEING_INITIALIZED) {
+ // Assume writes will propagate eventually...
+ while(default_initialized_ != __DEFAULT_INITIALIZED) {
+ detail::fence();
+ }
+
+ if (err != NULL) {
+ *err = default_error_;
+ }
+ return default_;
+ }
+
+ cl_int error;
+
+ Context context = Context::getDefault(&error);
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+
+ if (error != CL_SUCCESS) {
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+ else {
+ Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
+
+ default_ = CommandQueue(context, device, 0, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ detail::fence();
+
+ default_error_ = error;
+ // Assume writes will propagate eventually...
+ default_initialized_ = __DEFAULT_INITIALIZED;
+
+ detail::fence();
+
+ if (err != NULL) {
+ *err = default_error_;
+ }
+ return default_;
+
+ }
+
+ CommandQueue() { }
+
+ CommandQueue(const CommandQueue& commandQueue) : detail::Wrapper<cl_type>(commandQueue) { }
+
+ CommandQueue(const cl_command_queue& commandQueue) : detail::Wrapper<cl_type>(commandQueue) { }
+
+ CommandQueue& operator = (const CommandQueue& rhs)
+ {
+ if (this != &rhs) {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ }
+ return *this;
+ }
+
+ CommandQueue& operator = (const cl_command_queue& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ template <typename T>
+ cl_int getInfo(cl_command_queue_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(
+ &::clGetCommandQueueInfo, object_, name, param),
+ __GET_COMMAND_QUEUE_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_command_queue_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_command_queue_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ cl_int enqueueReadBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ ::size_t offset,
+ ::size_t size,
+ void* ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueReadBuffer(
+ object_, buffer(), blocking, offset, size,
+ ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_READ_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueWriteBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ ::size_t offset,
+ ::size_t size,
+ const void* ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueWriteBuffer(
+ object_, buffer(), blocking, offset, size,
+ ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_WRITE_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueCopyBuffer(
+ const Buffer& src,
+ const Buffer& dst,
+ ::size_t src_offset,
+ ::size_t dst_offset,
+ ::size_t size,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueCopyBuffer(
+ object_, src(), dst(), src_offset, dst_offset, size,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQEUE_COPY_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueReadBufferRect(
+ const Buffer& buffer,
+ cl_bool blocking,
+ const size_t<3>& buffer_offset,
+ const size_t<3>& host_offset,
+ const size_t<3>& region,
+ ::size_t buffer_row_pitch,
+ ::size_t buffer_slice_pitch,
+ ::size_t host_row_pitch,
+ ::size_t host_slice_pitch,
+ void *ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueReadBufferRect(
+ object_,
+ buffer(),
+ blocking,
+ (const ::size_t *)buffer_offset,
+ (const ::size_t *)host_offset,
+ (const ::size_t *)region,
+ buffer_row_pitch,
+ buffer_slice_pitch,
+ host_row_pitch,
+ host_slice_pitch,
+ ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_READ_BUFFER_RECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueWriteBufferRect(
+ const Buffer& buffer,
+ cl_bool blocking,
+ const size_t<3>& buffer_offset,
+ const size_t<3>& host_offset,
+ const size_t<3>& region,
+ ::size_t buffer_row_pitch,
+ ::size_t buffer_slice_pitch,
+ ::size_t host_row_pitch,
+ ::size_t host_slice_pitch,
+ void *ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueWriteBufferRect(
+ object_,
+ buffer(),
+ blocking,
+ (const ::size_t *)buffer_offset,
+ (const ::size_t *)host_offset,
+ (const ::size_t *)region,
+ buffer_row_pitch,
+ buffer_slice_pitch,
+ host_row_pitch,
+ host_slice_pitch,
+ ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_WRITE_BUFFER_RECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueCopyBufferRect(
+ const Buffer& src,
+ const Buffer& dst,
+ const size_t<3>& src_origin,
+ const size_t<3>& dst_origin,
+ const size_t<3>& region,
+ ::size_t src_row_pitch,
+ ::size_t src_slice_pitch,
+ ::size_t dst_row_pitch,
+ ::size_t dst_slice_pitch,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueCopyBufferRect(
+ object_,
+ src(),
+ dst(),
+ (const ::size_t *)src_origin,
+ (const ::size_t *)dst_origin,
+ (const ::size_t *)region,
+ src_row_pitch,
+ src_slice_pitch,
+ dst_row_pitch,
+ dst_slice_pitch,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQEUE_COPY_BUFFER_RECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+#if defined(CL_VERSION_1_2)
+ /**
+ * Enqueue a command to fill a buffer object with a pattern
+ * of a given size. The pattern is specified a as vector.
+ * \tparam PatternType The datatype of the pattern field.
+ * The pattern type must be an accepted OpenCL data type.
+ */
+ template<typename PatternType>
+ cl_int enqueueFillBuffer(
+ const Buffer& buffer,
+ PatternType pattern,
+ ::size_t offset,
+ ::size_t size,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueFillBuffer(
+ object_,
+ buffer(),
+ static_cast<void*>(&pattern),
+ sizeof(PatternType),
+ offset,
+ size,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_FILL_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif // #if defined(CL_VERSION_1_2)
+
+ cl_int enqueueReadImage(
+ const Image& image,
+ cl_bool blocking,
+ const size_t<3>& origin,
+ const size_t<3>& region,
+ ::size_t row_pitch,
+ ::size_t slice_pitch,
+ void* ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueReadImage(
+ object_, image(), blocking, (const ::size_t *) origin,
+ (const ::size_t *) region, row_pitch, slice_pitch, ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_READ_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueWriteImage(
+ const Image& image,
+ cl_bool blocking,
+ const size_t<3>& origin,
+ const size_t<3>& region,
+ ::size_t row_pitch,
+ ::size_t slice_pitch,
+ void* ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueWriteImage(
+ object_, image(), blocking, (const ::size_t *) origin,
+ (const ::size_t *) region, row_pitch, slice_pitch, ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_WRITE_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueCopyImage(
+ const Image& src,
+ const Image& dst,
+ const size_t<3>& src_origin,
+ const size_t<3>& dst_origin,
+ const size_t<3>& region,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueCopyImage(
+ object_, src(), dst(), (const ::size_t *) src_origin,
+ (const ::size_t *)dst_origin, (const ::size_t *) region,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_COPY_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+#if defined(CL_VERSION_1_2)
+ /**
+ * Enqueue a command to fill an image object with a specified color.
+ * \param fillColor is the color to use to fill the image.
+ * This is a four component RGBA floating-point color value if
+ * the image channel data type is not an unnormalized signed or
+ * unsigned data type.
+ */
+ cl_int enqueueFillImage(
+ const Image& image,
+ cl_float4 fillColor,
+ const size_t<3>& origin,
+ const size_t<3>& region,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueFillImage(
+ object_,
+ image(),
+ static_cast<void*>(&fillColor),
+ (const ::size_t *) origin,
+ (const ::size_t *) region,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_FILL_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * Enqueue a command to fill an image object with a specified color.
+ * \param fillColor is the color to use to fill the image.
+ * This is a four component RGBA signed integer color value if
+ * the image channel data type is an unnormalized signed integer
+ * type.
+ */
+ cl_int enqueueFillImage(
+ const Image& image,
+ cl_int4 fillColor,
+ const size_t<3>& origin,
+ const size_t<3>& region,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueFillImage(
+ object_,
+ image(),
+ static_cast<void*>(&fillColor),
+ (const ::size_t *) origin,
+ (const ::size_t *) region,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_FILL_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * Enqueue a command to fill an image object with a specified color.
+ * \param fillColor is the color to use to fill the image.
+ * This is a four component RGBA unsigned integer color value if
+ * the image channel data type is an unnormalized unsigned integer
+ * type.
+ */
+ cl_int enqueueFillImage(
+ const Image& image,
+ cl_uint4 fillColor,
+ const size_t<3>& origin,
+ const size_t<3>& region,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueFillImage(
+ object_,
+ image(),
+ static_cast<void*>(&fillColor),
+ (const ::size_t *) origin,
+ (const ::size_t *) region,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_FILL_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif // #if defined(CL_VERSION_1_2)
+
+ cl_int enqueueCopyImageToBuffer(
+ const Image& src,
+ const Buffer& dst,
+ const size_t<3>& src_origin,
+ const size_t<3>& region,
+ ::size_t dst_offset,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueCopyImageToBuffer(
+ object_, src(), dst(), (const ::size_t *) src_origin,
+ (const ::size_t *) region, dst_offset,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueCopyBufferToImage(
+ const Buffer& src,
+ const Image& dst,
+ ::size_t src_offset,
+ const size_t<3>& dst_origin,
+ const size_t<3>& region,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueCopyBufferToImage(
+ object_, src(), dst(), src_offset,
+ (const ::size_t *) dst_origin, (const ::size_t *) region,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ void* enqueueMapBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ cl_map_flags flags,
+ ::size_t offset,
+ ::size_t size,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL,
+ cl_int* err = NULL) const
+ {
+ cl_int error;
+ void * result = ::clEnqueueMapBuffer(
+ object_, buffer(), blocking, flags, offset, size,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (cl_event*) event,
+ &error);
+
+ detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ return result;
+ }
+
+ void* enqueueMapImage(
+ const Image& buffer,
+ cl_bool blocking,
+ cl_map_flags flags,
+ const size_t<3>& origin,
+ const size_t<3>& region,
+ ::size_t * row_pitch,
+ ::size_t * slice_pitch,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL,
+ cl_int* err = NULL) const
+ {
+ cl_int error;
+ void * result = ::clEnqueueMapImage(
+ object_, buffer(), blocking, flags,
+ (const ::size_t *) origin, (const ::size_t *) region,
+ row_pitch, slice_pitch,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (cl_event*) event,
+ &error);
+
+ detail::errHandler(error, __ENQUEUE_MAP_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ return result;
+ }
+
+ cl_int enqueueUnmapMemObject(
+ const Memory& memory,
+ void* mapped_ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueUnmapMemObject(
+ object_, memory(), mapped_ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+#if defined(CL_VERSION_1_2)
+ /**
+ * Enqueues a marker command which waits for either a list of events to complete,
+ * or all previously enqueued commands to complete.
+ *
+ * Enqueues a marker command which waits for either a list of events to complete,
+ * or if the list is empty it waits for all commands previously enqueued in command_queue
+ * to complete before it completes. This command returns an event which can be waited on,
+ * i.e. this event can be waited on to insure that all events either in the event_wait_list
+ * or all previously enqueued commands, queued before this command to command_queue,
+ * have completed.
+ */
+ cl_int enqueueMarkerWithWaitList(
+ const VECTOR_CLASS<Event> *events = 0,
+ Event *event = 0)
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueMarkerWithWaitList(
+ object_,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_MARKER_WAIT_LIST_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * A synchronization point that enqueues a barrier operation.
+ *
+ * Enqueues a barrier command which waits for either a list of events to complete,
+ * or if the list is empty it waits for all commands previously enqueued in command_queue
+ * to complete before it completes. This command blocks command execution, that is, any
+ * following commands enqueued after it do not execute until it completes. This command
+ * returns an event which can be waited on, i.e. this event can be waited on to insure that
+ * all events either in the event_wait_list or all previously enqueued commands, queued
+ * before this command to command_queue, have completed.
+ */
+ cl_int enqueueBarrierWithWaitList(
+ const VECTOR_CLASS<Event> *events = 0,
+ Event *event = 0)
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueBarrierWithWaitList(
+ object_,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_BARRIER_WAIT_LIST_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * Enqueues a command to indicate with which device a set of memory objects
+ * should be associated.
+ */
+ cl_int enqueueMigrateMemObjects(
+ const VECTOR_CLASS<Memory> &memObjects,
+ cl_mem_migration_flags flags,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL
+ )
+ {
+ cl_event tmp;
+
+ cl_mem* localMemObjects = static_cast<cl_mem*>(alloca(memObjects.size() * sizeof(cl_mem)));
+ for( int i = 0; i < (int)memObjects.size(); ++i ) {
+ localMemObjects[i] = memObjects[i]();
+ }
+
+
+ cl_int err = detail::errHandler(
+ ::clEnqueueMigrateMemObjects(
+ object_,
+ (cl_uint)memObjects.size(),
+ static_cast<const cl_mem*>(localMemObjects),
+ flags,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif // #if defined(CL_VERSION_1_2)
+
+ cl_int enqueueNDRangeKernel(
+ const Kernel& kernel,
+ const NDRange& offset,
+ const NDRange& global,
+ const NDRange& local = NullRange,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueNDRangeKernel(
+ object_, kernel(), (cl_uint) global.dimensions(),
+ offset.dimensions() != 0 ? (const ::size_t*) offset : NULL,
+ (const ::size_t*) global,
+ local.dimensions() != 0 ? (const ::size_t*) local : NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_NDRANGE_KERNEL_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueTask(
+ const Kernel& kernel,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueTask(
+ object_, kernel(),
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_TASK_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueNativeKernel(
+ void (CL_CALLBACK *userFptr)(void *),
+ std::pair<void*, ::size_t> args,
+ const VECTOR_CLASS<Memory>* mem_objects = NULL,
+ const VECTOR_CLASS<const void*>* mem_locs = NULL,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_mem * mems = (mem_objects != NULL && mem_objects->size() > 0)
+ ? (cl_mem*) alloca(mem_objects->size() * sizeof(cl_mem))
+ : NULL;
+
+ if (mems != NULL) {
+ for (unsigned int i = 0; i < mem_objects->size(); i++) {
+ mems[i] = ((*mem_objects)[i])();
+ }
+ }
+
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueNativeKernel(
+ object_, userFptr, args.first, args.second,
+ (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
+ mems,
+ (mem_locs != NULL) ? (const void **) &mem_locs->front() : NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_NATIVE_KERNEL);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+/**
+ * Deprecated APIs for 1.2
+ */
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) || (defined(CL_VERSION_1_1) && !defined(CL_VERSION_1_2))
+ CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ cl_int enqueueMarker(Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+ {
+ return detail::errHandler(
+ ::clEnqueueMarker(object_, (cl_event*) event),
+ __ENQUEUE_MARKER_ERR);
+ }
+
+ CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ cl_int enqueueWaitForEvents(const VECTOR_CLASS<Event>& events) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+ {
+ return detail::errHandler(
+ ::clEnqueueWaitForEvents(
+ object_,
+ (cl_uint) events.size(),
+ (const cl_event*) &events.front()),
+ __ENQUEUE_WAIT_FOR_EVENTS_ERR);
+ }
+#endif // #if defined(CL_VERSION_1_1)
+
+ cl_int enqueueAcquireGLObjects(
+ const VECTOR_CLASS<Memory>* mem_objects = NULL,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueAcquireGLObjects(
+ object_,
+ (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
+ (mem_objects != NULL) ? (const cl_mem *) &mem_objects->front(): NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_ACQUIRE_GL_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueReleaseGLObjects(
+ const VECTOR_CLASS<Memory>* mem_objects = NULL,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueReleaseGLObjects(
+ object_,
+ (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
+ (mem_objects != NULL) ? (const cl_mem *) &mem_objects->front(): NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_RELEASE_GL_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+#if defined (USE_DX_INTEROP)
+typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueAcquireD3D10ObjectsKHR)(
+ cl_command_queue command_queue, cl_uint num_objects,
+ const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
+ const cl_event* event_wait_list, cl_event* event);
+typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueReleaseD3D10ObjectsKHR)(
+ cl_command_queue command_queue, cl_uint num_objects,
+ const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
+ const cl_event* event_wait_list, cl_event* event);
+
+ cl_int enqueueAcquireD3D10Objects(
+ const VECTOR_CLASS<Memory>* mem_objects = NULL,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ static PFN_clEnqueueAcquireD3D10ObjectsKHR pfn_clEnqueueAcquireD3D10ObjectsKHR = NULL;
+#if defined(CL_VERSION_1_2)
+ cl_context context = getInfo<CL_QUEUE_CONTEXT>();
+ cl::Device device(getInfo<CL_QUEUE_DEVICE>());
+ cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
+ __INIT_CL_EXT_FCN_PTR_PLATFORM(platform, clEnqueueAcquireD3D10ObjectsKHR);
+#endif
+#if defined(CL_VERSION_1_1)
+ __INIT_CL_EXT_FCN_PTR(clEnqueueAcquireD3D10ObjectsKHR);
+#endif
+
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ pfn_clEnqueueAcquireD3D10ObjectsKHR(
+ object_,
+ (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
+ (mem_objects != NULL) ? (const cl_mem *) &mem_objects->front(): NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_ACQUIRE_GL_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueReleaseD3D10Objects(
+ const VECTOR_CLASS<Memory>* mem_objects = NULL,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ static PFN_clEnqueueReleaseD3D10ObjectsKHR pfn_clEnqueueReleaseD3D10ObjectsKHR = NULL;
+#if defined(CL_VERSION_1_2)
+ cl_context context = getInfo<CL_QUEUE_CONTEXT>();
+ cl::Device device(getInfo<CL_QUEUE_DEVICE>());
+ cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
+ __INIT_CL_EXT_FCN_PTR_PLATFORM(platform, clEnqueueReleaseD3D10ObjectsKHR);
+#endif // #if defined(CL_VERSION_1_2)
+#if defined(CL_VERSION_1_1)
+ __INIT_CL_EXT_FCN_PTR(clEnqueueReleaseD3D10ObjectsKHR);
+#endif // #if defined(CL_VERSION_1_1)
+
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ pfn_clEnqueueReleaseD3D10ObjectsKHR(
+ object_,
+ (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
+ (mem_objects != NULL) ? (const cl_mem *) &mem_objects->front(): NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_RELEASE_GL_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif
+
+/**
+ * Deprecated APIs for 1.2
+ */
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) || (defined(CL_VERSION_1_1) && !defined(CL_VERSION_1_2))
+ CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ cl_int enqueueBarrier() const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+ {
+ return detail::errHandler(
+ ::clEnqueueBarrier(object_),
+ __ENQUEUE_BARRIER_ERR);
+ }
+#endif // #if defined(CL_VERSION_1_1)
+
+ cl_int flush() const
+ {
+ return detail::errHandler(::clFlush(object_), __FLUSH_ERR);
+ }
+
+ cl_int finish() const
+ {
+ return detail::errHandler(::clFinish(object_), __FINISH_ERR);
+ }
+};
+
+#ifdef _WIN32
+__declspec(selectany) volatile int CommandQueue::default_initialized_ = __DEFAULT_NOT_INITIALIZED;
+__declspec(selectany) CommandQueue CommandQueue::default_;
+__declspec(selectany) volatile cl_int CommandQueue::default_error_ = CL_SUCCESS;
+#else
+__attribute__((weak)) volatile int CommandQueue::default_initialized_ = __DEFAULT_NOT_INITIALIZED;
+__attribute__((weak)) CommandQueue CommandQueue::default_;
+__attribute__((weak)) volatile cl_int CommandQueue::default_error_ = CL_SUCCESS;
+#endif
+
+template< typename IteratorType >
+Buffer::Buffer(
+ const Context &context,
+ IteratorType startIterator,
+ IteratorType endIterator,
+ bool readOnly,
+ bool useHostPtr,
+ cl_int* err)
+{
+ typedef typename std::iterator_traits<IteratorType>::value_type DataType;
+ cl_int error;
+
+ cl_mem_flags flags = 0;
+ if( readOnly ) {
+ flags |= CL_MEM_READ_ONLY;
+ }
+ else {
+ flags |= CL_MEM_READ_WRITE;
+ }
+ if( useHostPtr ) {
+ flags |= CL_MEM_USE_HOST_PTR;
+ }
+
+ ::size_t size = sizeof(DataType)*(endIterator - startIterator);
+
+ if( useHostPtr ) {
+ object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
+ } else {
+ object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
+ }
+
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ if( !useHostPtr ) {
+ CommandQueue queue(context, 0, &error);
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ error = cl::copy(queue, startIterator, endIterator, *this);
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+}
+
+inline cl_int enqueueReadBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ ::size_t offset,
+ ::size_t size,
+ void* ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueReadBuffer(buffer, blocking, offset, size, ptr, events, event);
+}
+
+inline cl_int enqueueWriteBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ ::size_t offset,
+ ::size_t size,
+ const void* ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueWriteBuffer(buffer, blocking, offset, size, ptr, events, event);
+}
+
+inline void* enqueueMapBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ cl_map_flags flags,
+ ::size_t offset,
+ ::size_t size,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL,
+ cl_int* err = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ void * result = ::clEnqueueMapBuffer(
+ queue(), buffer(), blocking, flags, offset, size,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (cl_event*) event,
+ &error);
+
+ detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ return result;
+}
+
+inline cl_int enqueueUnmapMemObject(
+ const Memory& memory,
+ void* mapped_ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueUnmapMemObject(
+ queue(), memory(), mapped_ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+}
+
+inline cl_int enqueueCopyBuffer(
+ const Buffer& src,
+ const Buffer& dst,
+ ::size_t src_offset,
+ ::size_t dst_offset,
+ ::size_t size,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueCopyBuffer(src, dst, src_offset, dst_offset, size, events, event);
+}
+
+/**
+ * Blocking copy operation between iterators and a buffer.
+ * Host to Device.
+ * Uses default command queue.
+ */
+template< typename IteratorType >
+inline cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ if (error != CL_SUCCESS)
+ return error;
+
+ return cl::copy(queue, startIterator, endIterator, buffer);
+}
+
+/**
+ * Blocking copy operation between iterators and a buffer.
+ * Device to Host.
+ * Uses default command queue.
+ */
+template< typename IteratorType >
+inline cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ if (error != CL_SUCCESS)
+ return error;
+
+ return cl::copy(queue, buffer, startIterator, endIterator);
+}
+
+/**
+ * Blocking copy operation between iterators and a buffer.
+ * Host to Device.
+ * Uses specified queue.
+ */
+template< typename IteratorType >
+inline cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
+{
+ typedef typename std::iterator_traits<IteratorType>::value_type DataType;
+ cl_int error;
+
+ ::size_t length = endIterator-startIterator;
+ ::size_t byteLength = length*sizeof(DataType);
+
+ DataType *pointer =
+ static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_WRITE, 0, byteLength, 0, 0, &error));
+ // if exceptions enabled, enqueueMapBuffer will throw
+ if( error != CL_SUCCESS ) {
+ return error;
+ }
+#if defined(_MSC_VER)
+ std::copy(
+ startIterator,
+ endIterator,
+ stdext::checked_array_iterator<DataType*>(
+ pointer, length));
+#else
+ std::copy(startIterator, endIterator, pointer);
+#endif
+ Event endEvent;
+ error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
+ // if exceptions enabled, enqueueUnmapMemObject will throw
+ if( error != CL_SUCCESS ) {
+ return error;
+ }
+ endEvent.wait();
+ return CL_SUCCESS;
+}
+
+/**
+ * Blocking copy operation between iterators and a buffer.
+ * Device to Host.
+ * Uses specified queue.
+ */
+template< typename IteratorType >
+inline cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
+{
+ typedef typename std::iterator_traits<IteratorType>::value_type DataType;
+ cl_int error;
+
+ ::size_t length = endIterator-startIterator;
+ ::size_t byteLength = length*sizeof(DataType);
+
+ DataType *pointer =
+ static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_READ, 0, byteLength, 0, 0, &error));
+ // if exceptions enabled, enqueueMapBuffer will throw
+ if( error != CL_SUCCESS ) {
+ return error;
+ }
+ std::copy(pointer, pointer + length, startIterator);
+ Event endEvent;
+ error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
+ // if exceptions enabled, enqueueUnmapMemObject will throw
+ if( error != CL_SUCCESS ) {
+ return error;
+ }
+ endEvent.wait();
+ return CL_SUCCESS;
+}
+
+#if defined(CL_VERSION_1_1)
+inline cl_int enqueueReadBufferRect(
+ const Buffer& buffer,
+ cl_bool blocking,
+ const size_t<3>& buffer_offset,
+ const size_t<3>& host_offset,
+ const size_t<3>& region,
+ ::size_t buffer_row_pitch,
+ ::size_t buffer_slice_pitch,
+ ::size_t host_row_pitch,
+ ::size_t host_slice_pitch,
+ void *ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueReadBufferRect(
+ buffer,
+ blocking,
+ buffer_offset,
+ host_offset,
+ region,
+ buffer_row_pitch,
+ buffer_slice_pitch,
+ host_row_pitch,
+ host_slice_pitch,
+ ptr,
+ events,
+ event);
+}
+
+inline cl_int enqueueWriteBufferRect(
+ const Buffer& buffer,
+ cl_bool blocking,
+ const size_t<3>& buffer_offset,
+ const size_t<3>& host_offset,
+ const size_t<3>& region,
+ ::size_t buffer_row_pitch,
+ ::size_t buffer_slice_pitch,
+ ::size_t host_row_pitch,
+ ::size_t host_slice_pitch,
+ void *ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueWriteBufferRect(
+ buffer,
+ blocking,
+ buffer_offset,
+ host_offset,
+ region,
+ buffer_row_pitch,
+ buffer_slice_pitch,
+ host_row_pitch,
+ host_slice_pitch,
+ ptr,
+ events,
+ event);
+}
+
+inline cl_int enqueueCopyBufferRect(
+ const Buffer& src,
+ const Buffer& dst,
+ const size_t<3>& src_origin,
+ const size_t<3>& dst_origin,
+ const size_t<3>& region,
+ ::size_t src_row_pitch,
+ ::size_t src_slice_pitch,
+ ::size_t dst_row_pitch,
+ ::size_t dst_slice_pitch,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueCopyBufferRect(
+ src,
+ dst,
+ src_origin,
+ dst_origin,
+ region,
+ src_row_pitch,
+ src_slice_pitch,
+ dst_row_pitch,
+ dst_slice_pitch,
+ events,
+ event);
+}
+#endif
+
+inline cl_int enqueueReadImage(
+ const Image& image,
+ cl_bool blocking,
+ const size_t<3>& origin,
+ const size_t<3>& region,
+ ::size_t row_pitch,
+ ::size_t slice_pitch,
+ void* ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueReadImage(
+ image,
+ blocking,
+ origin,
+ region,
+ row_pitch,
+ slice_pitch,
+ ptr,
+ events,
+ event);
+}
+
+inline cl_int enqueueWriteImage(
+ const Image& image,
+ cl_bool blocking,
+ const size_t<3>& origin,
+ const size_t<3>& region,
+ ::size_t row_pitch,
+ ::size_t slice_pitch,
+ void* ptr,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueWriteImage(
+ image,
+ blocking,
+ origin,
+ region,
+ row_pitch,
+ slice_pitch,
+ ptr,
+ events,
+ event);
+}
+
+inline cl_int enqueueCopyImage(
+ const Image& src,
+ const Image& dst,
+ const size_t<3>& src_origin,
+ const size_t<3>& dst_origin,
+ const size_t<3>& region,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueCopyImage(
+ src,
+ dst,
+ src_origin,
+ dst_origin,
+ region,
+ events,
+ event);
+}
+
+inline cl_int enqueueCopyImageToBuffer(
+ const Image& src,
+ const Buffer& dst,
+ const size_t<3>& src_origin,
+ const size_t<3>& region,
+ ::size_t dst_offset,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueCopyImageToBuffer(
+ src,
+ dst,
+ src_origin,
+ region,
+ dst_offset,
+ events,
+ event);
+}
+
+inline cl_int enqueueCopyBufferToImage(
+ const Buffer& src,
+ const Image& dst,
+ ::size_t src_offset,
+ const size_t<3>& dst_origin,
+ const size_t<3>& region,
+ const VECTOR_CLASS<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueCopyBufferToImage(
+ src,
+ dst,
+ src_offset,
+ dst_origin,
+ region,
+ events,
+ event);
+}
+
+
+inline cl_int flush(void)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.flush();
+}
+
+inline cl_int finish(void)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+
+ return queue.finish();
+}
+
+// Kernel Functor support
+// New interface as of September 2011
+// Requires the C++11 std::tr1::function (note do not support TR1)
+// Visual Studio 2010 and GCC 4.2
+
+struct EnqueueArgs
+{
+ CommandQueue queue_;
+ const NDRange offset_;
+ const NDRange global_;
+ const NDRange local_;
+ VECTOR_CLASS<Event> events_;
+
+ EnqueueArgs(NDRange global) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange)
+ {
+
+ }
+
+ EnqueueArgs(NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(local)
+ {
+
+ }
+
+ EnqueueArgs(NDRange offset, NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(offset),
+ global_(global),
+ local_(local)
+ {
+
+ }
+
+ EnqueueArgs(Event e, NDRange global) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(Event e, NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(local)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(Event e, NDRange offset, NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(offset),
+ global_(global),
+ local_(local)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(const VECTOR_CLASS<Event> &events, NDRange global) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange),
+ events_(events)
+ {
+
+ }
+
+ EnqueueArgs(const VECTOR_CLASS<Event> &events, NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(local),
+ events_(events)
+ {
+
+ }
+
+ EnqueueArgs(const VECTOR_CLASS<Event> &events, NDRange offset, NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(offset),
+ global_(global),
+ local_(local),
+ events_(events)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, NDRange global) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(local)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, NDRange offset, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(offset),
+ global_(global),
+ local_(local)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, Event e, NDRange global) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(CommandQueue &queue, Event e, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(local)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(CommandQueue &queue, Event e, NDRange offset, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(offset),
+ global_(global),
+ local_(local)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(CommandQueue &queue, const VECTOR_CLASS<Event> &events, NDRange global) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange),
+ events_(events)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, const VECTOR_CLASS<Event> &events, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(local),
+ events_(events)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, const VECTOR_CLASS<Event> &events, NDRange offset, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(offset),
+ global_(global),
+ local_(local),
+ events_(events)
+ {
+
+ }
+};
+
+namespace detail {
+
+class NullType {};
+
+template<int index, typename T0>
+struct SetArg
+{
+ static void set (Kernel kernel, T0 arg)
+ {
+ kernel.setArg(index, arg);
+ }
+};
+
+template<int index>
+struct SetArg<index, NullType>
+{
+ static void set (Kernel, NullType)
+ {
+ }
+};
+
+template <
+ typename T0, typename T1, typename T2, typename T3,
+ typename T4, typename T5, typename T6, typename T7,
+ typename T8, typename T9, typename T10, typename T11,
+ typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19,
+ typename T20, typename T21, typename T22, typename T23,
+ typename T24, typename T25, typename T26, typename T27,
+ typename T28, typename T29, typename T30, typename T31
+>
+class KernelFunctorGlobal
+{
+private:
+ Kernel kernel_;
+
+public:
+ KernelFunctorGlobal(
+ Kernel kernel) :
+ kernel_(kernel)
+ {}
+
+ KernelFunctorGlobal(
+ const Program& program,
+ const STRING_CLASS name,
+ cl_int * err = NULL) :
+ kernel_(program, name.c_str(), err)
+ {}
+
+ Event operator() (
+ const EnqueueArgs& args,
+ T0 t0,
+ T1 t1 = NullType(),
+ T2 t2 = NullType(),
+ T3 t3 = NullType(),
+ T4 t4 = NullType(),
+ T5 t5 = NullType(),
+ T6 t6 = NullType(),
+ T7 t7 = NullType(),
+ T8 t8 = NullType(),
+ T9 t9 = NullType(),
+ T10 t10 = NullType(),
+ T11 t11 = NullType(),
+ T12 t12 = NullType(),
+ T13 t13 = NullType(),
+ T14 t14 = NullType(),
+ T15 t15 = NullType(),
+ T16 t16 = NullType(),
+ T17 t17 = NullType(),
+ T18 t18 = NullType(),
+ T19 t19 = NullType(),
+ T20 t20 = NullType(),
+ T21 t21 = NullType(),
+ T22 t22 = NullType(),
+ T23 t23 = NullType(),
+ T24 t24 = NullType(),
+ T25 t25 = NullType(),
+ T26 t26 = NullType(),
+ T27 t27 = NullType(),
+ T28 t28 = NullType(),
+ T29 t29 = NullType(),
+ T30 t30 = NullType(),
+ T31 t31 = NullType()
+ )
+ {
+ Event event;
+ SetArg<0, T0>::set(kernel_, t0);
+ SetArg<1, T1>::set(kernel_, t1);
+ SetArg<2, T2>::set(kernel_, t2);
+ SetArg<3, T3>::set(kernel_, t3);
+ SetArg<4, T4>::set(kernel_, t4);
+ SetArg<5, T5>::set(kernel_, t5);
+ SetArg<6, T6>::set(kernel_, t6);
+ SetArg<7, T7>::set(kernel_, t7);
+ SetArg<8, T8>::set(kernel_, t8);
+ SetArg<9, T9>::set(kernel_, t9);
+ SetArg<10, T10>::set(kernel_, t10);
+ SetArg<11, T11>::set(kernel_, t11);
+ SetArg<12, T12>::set(kernel_, t12);
+ SetArg<13, T13>::set(kernel_, t13);
+ SetArg<14, T14>::set(kernel_, t14);
+ SetArg<15, T15>::set(kernel_, t15);
+ SetArg<16, T16>::set(kernel_, t16);
+ SetArg<17, T17>::set(kernel_, t17);
+ SetArg<18, T18>::set(kernel_, t18);
+ SetArg<19, T19>::set(kernel_, t19);
+ SetArg<20, T20>::set(kernel_, t20);
+ SetArg<21, T21>::set(kernel_, t21);
+ SetArg<22, T22>::set(kernel_, t22);
+ SetArg<23, T23>::set(kernel_, t23);
+ SetArg<24, T24>::set(kernel_, t24);
+ SetArg<25, T25>::set(kernel_, t25);
+ SetArg<26, T26>::set(kernel_, t26);
+ SetArg<27, T27>::set(kernel_, t27);
+ SetArg<28, T28>::set(kernel_, t28);
+ SetArg<29, T29>::set(kernel_, t29);
+ SetArg<30, T30>::set(kernel_, t30);
+ SetArg<31, T31>::set(kernel_, t31);
+
+ args.queue_.enqueueNDRangeKernel(
+ kernel_,
+ args.offset_,
+ args.global_,
+ args.local_,
+ &args.events_,
+ &event);
+
+ return event;
+ }
+
+};
+
+//------------------------------------------------------------------------------------------------------
+
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21,
+ typename T22,
+ typename T23,
+ typename T24,
+ typename T25,
+ typename T26,
+ typename T27,
+ typename T28,
+ typename T29,
+ typename T30,
+ typename T31>
+struct functionImplementation_
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28,
+ T29,
+ T30,
+ T31> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 32))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28,
+ T29,
+ T30,
+ T31);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21,
+ T22 arg22,
+ T23 arg23,
+ T24 arg24,
+ T25 arg25,
+ T26 arg26,
+ T27 arg27,
+ T28 arg28,
+ T29 arg29,
+ T30 arg30,
+ T31 arg31)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21,
+ arg22,
+ arg23,
+ arg24,
+ arg25,
+ arg26,
+ arg27,
+ arg28,
+ arg29,
+ arg30,
+ arg31);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21,
+ typename T22,
+ typename T23,
+ typename T24,
+ typename T25,
+ typename T26,
+ typename T27,
+ typename T28,
+ typename T29,
+ typename T30>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28,
+ T29,
+ T30,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28,
+ T29,
+ T30,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 31))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28,
+ T29,
+ T30);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21,
+ T22 arg22,
+ T23 arg23,
+ T24 arg24,
+ T25 arg25,
+ T26 arg26,
+ T27 arg27,
+ T28 arg28,
+ T29 arg29,
+ T30 arg30)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21,
+ arg22,
+ arg23,
+ arg24,
+ arg25,
+ arg26,
+ arg27,
+ arg28,
+ arg29,
+ arg30);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21,
+ typename T22,
+ typename T23,
+ typename T24,
+ typename T25,
+ typename T26,
+ typename T27,
+ typename T28,
+ typename T29>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28,
+ T29,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28,
+ T29,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 30))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28,
+ T29);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21,
+ T22 arg22,
+ T23 arg23,
+ T24 arg24,
+ T25 arg25,
+ T26 arg26,
+ T27 arg27,
+ T28 arg28,
+ T29 arg29)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21,
+ arg22,
+ arg23,
+ arg24,
+ arg25,
+ arg26,
+ arg27,
+ arg28,
+ arg29);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21,
+ typename T22,
+ typename T23,
+ typename T24,
+ typename T25,
+ typename T26,
+ typename T27,
+ typename T28>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 29))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ T28);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21,
+ T22 arg22,
+ T23 arg23,
+ T24 arg24,
+ T25 arg25,
+ T26 arg26,
+ T27 arg27,
+ T28 arg28)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21,
+ arg22,
+ arg23,
+ arg24,
+ arg25,
+ arg26,
+ arg27,
+ arg28);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21,
+ typename T22,
+ typename T23,
+ typename T24,
+ typename T25,
+ typename T26,
+ typename T27>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 28))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ T27);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21,
+ T22 arg22,
+ T23 arg23,
+ T24 arg24,
+ T25 arg25,
+ T26 arg26,
+ T27 arg27)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21,
+ arg22,
+ arg23,
+ arg24,
+ arg25,
+ arg26,
+ arg27);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21,
+ typename T22,
+ typename T23,
+ typename T24,
+ typename T25,
+ typename T26>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 27))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ T26);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21,
+ T22 arg22,
+ T23 arg23,
+ T24 arg24,
+ T25 arg25,
+ T26 arg26)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21,
+ arg22,
+ arg23,
+ arg24,
+ arg25,
+ arg26);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21,
+ typename T22,
+ typename T23,
+ typename T24,
+ typename T25>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 26))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ T25);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21,
+ T22 arg22,
+ T23 arg23,
+ T24 arg24,
+ T25 arg25)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21,
+ arg22,
+ arg23,
+ arg24,
+ arg25);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21,
+ typename T22,
+ typename T23,
+ typename T24>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 25))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ T24);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21,
+ T22 arg22,
+ T23 arg23,
+ T24 arg24)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21,
+ arg22,
+ arg23,
+ arg24);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21,
+ typename T22,
+ typename T23>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 24))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ T23);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21,
+ T22 arg22,
+ T23 arg23)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21,
+ arg22,
+ arg23);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21,
+ typename T22>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 23))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ T22);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21,
+ T22 arg22)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21,
+ arg22);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20,
+ typename T21>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 22))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ T21);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20,
+ T21 arg21)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20,
+ arg21);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19,
+ typename T20>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 21))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ T20);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19,
+ T20 arg20)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19,
+ arg20);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18,
+ typename T19>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 20))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ T19);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18,
+ T19 arg19)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18,
+ arg19);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17,
+ typename T18>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 19))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ T18);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17,
+ T18 arg18)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17,
+ arg18);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16,
+ typename T17>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 18))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ T17);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16,
+ T17 arg17)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16,
+ arg17);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15,
+ typename T16>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 17))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ T16);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15,
+ T16 arg16)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15,
+ arg16);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14,
+ typename T15>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 16))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ T15);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14,
+ T15 arg15)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14,
+ arg15);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13,
+ typename T14>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 15))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ T14);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13,
+ T14 arg14)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13,
+ arg14);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12,
+ typename T13>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 14))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ T13);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12,
+ T13 arg13)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12,
+ arg13);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11,
+ typename T12>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 13))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ T12);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11,
+ T12 arg12)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11,
+ arg12);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10,
+ typename T11>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 12))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ T11);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10,
+ T11 arg11)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10,
+ arg11);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9,
+ typename T10>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 11))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ T10);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9,
+ T10 arg10)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9,
+ arg10);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8,
+ typename T9>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 10))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ T9);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8,
+ T9 arg9)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8,
+ arg9);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7,
+ typename T8>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 9))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ T8);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7,
+ T8 arg8)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ arg8);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6,
+ typename T7>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 8))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ T7);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6,
+ T7 arg7)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5,
+ typename T6>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 7))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ T6);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5,
+ T6 arg6)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 6))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ T5);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4,
+ T5 arg5)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ T4,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 5))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3,
+ T4);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3,
+ T4 arg4)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2,
+ typename T3>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ T3,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ T3,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 4))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2,
+ T3);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2,
+ T3 arg3)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2,
+ arg3);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1,
+ typename T2>
+struct functionImplementation_
+< T0,
+ T1,
+ T2,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ T2,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 3))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1,
+ T2);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1,
+ T2 arg2)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1,
+ arg2);
+ }
+
+
+};
+
+template<
+ typename T0,
+ typename T1>
+struct functionImplementation_
+< T0,
+ T1,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ T1,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 2))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0,
+ T1);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0,
+ T1 arg1)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0,
+ arg1);
+ }
+
+
+};
+
+template<
+ typename T0>
+struct functionImplementation_
+< T0,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType>
+{
+ typedef detail::KernelFunctorGlobal<
+ T0,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType,
+ NullType> FunctorType;
+
+ FunctorType functor_;
+
+ functionImplementation_(const FunctorType &functor) :
+ functor_(functor)
+ {
+
+ #if (defined(_WIN32) && defined(_VARIADIC_MAX) && (_VARIADIC_MAX < 1))
+ // Fail variadic expansion for dev11
+ static_assert(0, "Visual Studio has a hard limit of argument count for a std::function expansion. Please define _VARIADIC_MAX to be 10. If you need more arguments than that VC12 and below cannot support it.");
+ #endif
+
+ }
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ T0);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ T0 arg0)
+ {
+ return functor_(
+ enqueueArgs,
+ arg0);
+ }
+
+
+};
+
+
+
+
+
+} // namespace detail
+
+//----------------------------------------------------------------------------------------------
+
+template <
+ typename T0, typename T1 = detail::NullType, typename T2 = detail::NullType,
+ typename T3 = detail::NullType, typename T4 = detail::NullType,
+ typename T5 = detail::NullType, typename T6 = detail::NullType,
+ typename T7 = detail::NullType, typename T8 = detail::NullType,
+ typename T9 = detail::NullType, typename T10 = detail::NullType,
+ typename T11 = detail::NullType, typename T12 = detail::NullType,
+ typename T13 = detail::NullType, typename T14 = detail::NullType,
+ typename T15 = detail::NullType, typename T16 = detail::NullType,
+ typename T17 = detail::NullType, typename T18 = detail::NullType,
+ typename T19 = detail::NullType, typename T20 = detail::NullType,
+ typename T21 = detail::NullType, typename T22 = detail::NullType,
+ typename T23 = detail::NullType, typename T24 = detail::NullType,
+ typename T25 = detail::NullType, typename T26 = detail::NullType,
+ typename T27 = detail::NullType, typename T28 = detail::NullType,
+ typename T29 = detail::NullType, typename T30 = detail::NullType,
+ typename T31 = detail::NullType
+>
+struct make_kernel :
+ public detail::functionImplementation_<
+ T0, T1, T2, T3,
+ T4, T5, T6, T7,
+ T8, T9, T10, T11,
+ T12, T13, T14, T15,
+ T16, T17, T18, T19,
+ T20, T21, T22, T23,
+ T24, T25, T26, T27,
+ T28, T29, T30, T31
+ >
+{
+public:
+ typedef detail::KernelFunctorGlobal<
+ T0, T1, T2, T3,
+ T4, T5, T6, T7,
+ T8, T9, T10, T11,
+ T12, T13, T14, T15,
+ T16, T17, T18, T19,
+ T20, T21, T22, T23,
+ T24, T25, T26, T27,
+ T28, T29, T30, T31
+ > FunctorType;
+
+ make_kernel(
+ const Program& program,
+ const STRING_CLASS name,
+ cl_int * err = NULL) :
+ detail::functionImplementation_<
+ T0, T1, T2, T3,
+ T4, T5, T6, T7,
+ T8, T9, T10, T11,
+ T12, T13, T14, T15,
+ T16, T17, T18, T19,
+ T20, T21, T22, T23,
+ T24, T25, T26, T27,
+ T28, T29, T30, T31
+ >(
+ FunctorType(program, name, err))
+ {}
+
+ make_kernel(
+ const Kernel kernel) :
+ detail::functionImplementation_<
+ T0, T1, T2, T3,
+ T4, T5, T6, T7,
+ T8, T9, T10, T11,
+ T12, T13, T14, T15,
+ T16, T17, T18, T19,
+ T20, T21, T22, T23,
+ T24, T25, T26, T27,
+ T28, T29, T30, T31
+ >(
+ FunctorType(kernel))
+ {}
+};
+
+
+//----------------------------------------------------------------------------------------------------------------------
+
+#undef __ERR_STR
+#if !defined(__CL_USER_OVERRIDE_ERROR_STRINGS)
+#undef __GET_DEVICE_INFO_ERR
+#undef __GET_PLATFORM_INFO_ERR
+#undef __GET_DEVICE_IDS_ERR
+#undef __GET_CONTEXT_INFO_ERR
+#undef __GET_EVENT_INFO_ERR
+#undef __GET_EVENT_PROFILE_INFO_ERR
+#undef __GET_MEM_OBJECT_INFO_ERR
+#undef __GET_IMAGE_INFO_ERR
+#undef __GET_SAMPLER_INFO_ERR
+#undef __GET_KERNEL_INFO_ERR
+#undef __GET_KERNEL_ARG_INFO_ERR
+#undef __GET_KERNEL_WORK_GROUP_INFO_ERR
+#undef __GET_PROGRAM_INFO_ERR
+#undef __GET_PROGRAM_BUILD_INFO_ERR
+#undef __GET_COMMAND_QUEUE_INFO_ERR
+
+#undef __CREATE_CONTEXT_ERR
+#undef __CREATE_CONTEXT_FROM_TYPE_ERR
+#undef __GET_SUPPORTED_IMAGE_FORMATS_ERR
+
+#undef __CREATE_BUFFER_ERR
+#undef __CREATE_SUBBUFFER_ERR
+#undef __CREATE_IMAGE2D_ERR
+#undef __CREATE_IMAGE3D_ERR
+#undef __CREATE_SAMPLER_ERR
+#undef __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR
+
+#undef __CREATE_USER_EVENT_ERR
+#undef __SET_USER_EVENT_STATUS_ERR
+#undef __SET_EVENT_CALLBACK_ERR
+#undef __SET_PRINTF_CALLBACK_ERR
+
+#undef __WAIT_FOR_EVENTS_ERR
+
+#undef __CREATE_KERNEL_ERR
+#undef __SET_KERNEL_ARGS_ERR
+#undef __CREATE_PROGRAM_WITH_SOURCE_ERR
+#undef __CREATE_PROGRAM_WITH_BINARY_ERR
+#undef __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR
+#undef __BUILD_PROGRAM_ERR
+#undef __CREATE_KERNELS_IN_PROGRAM_ERR
+
+#undef __CREATE_COMMAND_QUEUE_ERR
+#undef __SET_COMMAND_QUEUE_PROPERTY_ERR
+#undef __ENQUEUE_READ_BUFFER_ERR
+#undef __ENQUEUE_WRITE_BUFFER_ERR
+#undef __ENQUEUE_READ_BUFFER_RECT_ERR
+#undef __ENQUEUE_WRITE_BUFFER_RECT_ERR
+#undef __ENQEUE_COPY_BUFFER_ERR
+#undef __ENQEUE_COPY_BUFFER_RECT_ERR
+#undef __ENQUEUE_READ_IMAGE_ERR
+#undef __ENQUEUE_WRITE_IMAGE_ERR
+#undef __ENQUEUE_COPY_IMAGE_ERR
+#undef __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR
+#undef __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR
+#undef __ENQUEUE_MAP_BUFFER_ERR
+#undef __ENQUEUE_MAP_IMAGE_ERR
+#undef __ENQUEUE_UNMAP_MEM_OBJECT_ERR
+#undef __ENQUEUE_NDRANGE_KERNEL_ERR
+#undef __ENQUEUE_TASK_ERR
+#undef __ENQUEUE_NATIVE_KERNEL
+
+#undef __CL_EXPLICIT_CONSTRUCTORS
+
+#undef __UNLOAD_COMPILER_ERR
+#endif //__CL_USER_OVERRIDE_ERROR_STRINGS
+
+#undef __CL_FUNCTION_TYPE
+
+// Extensions
+/**
+ * Deprecated APIs for 1.2
+ */
+#if defined(CL_VERSION_1_1)
+#undef __INIT_CL_EXT_FCN_PTR
+#endif // #if defined(CL_VERSION_1_1)
+#undef __CREATE_SUB_DEVICES
+
+#if defined(USE_CL_DEVICE_FISSION)
+#undef __PARAM_NAME_DEVICE_FISSION
+#endif // USE_CL_DEVICE_FISSION
+
+#undef __DEFAULT_NOT_INITIALIZED
+#undef __DEFAULT_BEING_INITIALIZED
+#undef __DEFAULT_INITIALIZED
+
+} // namespace cl
+
+#ifdef _WIN32
+#pragma pop_macro("max")
+#endif // _WIN32
+
+#endif // CL_HPP_
diff --git a/include/CL/cl2.hpp b/include/CL/cl2.hpp
new file mode 100644
index 0000000000..c6cd8a716c
--- /dev/null
+++ b/include/CL/cl2.hpp
@@ -0,0 +1,9526 @@
+/*******************************************************************************
+ * Copyright (c) 2008-2015 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ ******************************************************************************/
+
+/*! \file
+ *
+ * \brief C++ bindings for OpenCL 1.0 (rev 48), OpenCL 1.1 (rev 33),
+ * OpenCL 1.2 (rev 15) and OpenCL 2.0 (rev 29)
+ * \author Lee Howes and Bruce Merry
+ *
+ * Derived from the OpenCL 1.x C++ bindings written by
+ * Benedict R. Gaster, Laurent Morichetti and Lee Howes
+ * With additions and fixes from:
+ * Brian Cole, March 3rd 2010 and April 2012
+ * Matt Gruenke, April 2012.
+ * Bruce Merry, February 2013.
+ * Tom Deakin and Simon McIntosh-Smith, July 2013
+ * James Price, June-November 2015
+ *
+ * \version 2.0.8
+ * \date 2015-11-03
+ *
+ * Optional extension support
+ *
+ * cl_ext_device_fission
+ * #define CL_HPP_USE_CL_DEVICE_FISSION
+ * cl_khr_d3d10_sharing
+ * #define CL_HPP_USE_DX_INTEROP
+ * cl_khr_sub_groups
+ * #define CL_HPP_USE_CL_SUB_GROUPS_KHR
+ */
+
+/*! \mainpage
+ * \section intro Introduction
+ * For many large applications C++ is the language of choice and so it seems
+ * reasonable to define C++ bindings for OpenCL.
+ *
+ * The interface is contained with a single C++ header file \em cl2.hpp and all
+ * definitions are contained within the namespace \em cl. There is no additional
+ * requirement to include \em cl.h and to use either the C++ or original C
+ * bindings; it is enough to simply include \em cl.hpp.
+ *
+ * The bindings themselves are lightweight and correspond closely to the
+ * underlying C API. Using the C++ bindings introduces no additional execution
+ * overhead.
+ *
+ * There are numerous compatibility, portability and memory management
+ * fixes in the new header as well as additional OpenCL 2.0 features.
+ * As a result the header is not directly backward compatible and for this
+ * reason we release it as cl2.hpp rather than a new version of cl.hpp.
+ *
+ *
+ * \section compatibility Compatibility
+ * Due to the evolution of the underlying OpenCL API the 2.0 C++ bindings
+ * include an updated approach to defining supported feature versions
+ * and the range of valid underlying OpenCL runtime versions supported.
+ *
+ * The combination of preprocessor macros CL_HPP_TARGET_OPENCL_VERSION and
+ * CL_HPP_MINIMUM_OPENCL_VERSION control this range. These are three digit
+ * decimal values representing OpenCL runime versions. The default for
+ * the target is 200, representing OpenCL 2.0 and the minimum is also
+ * defined as 200. These settings would use 2.0 API calls only.
+ * If backward compatibility with a 1.2 runtime is required, the minimum
+ * version may be set to 120.
+ *
+ * Note that this is a compile-time setting, and so affects linking against
+ * a particular SDK version rather than the versioning of the loaded runtime.
+ *
+ * The earlier versions of the header included basic vector and string
+ * classes based loosely on STL versions. These were difficult to
+ * maintain and very rarely used. For the 2.0 header we now assume
+ * the presence of the standard library unless requested otherwise.
+ * We use std::array, std::vector, std::shared_ptr and std::string
+ * throughout to safely manage memory and reduce the chance of a
+ * recurrance of earlier memory management bugs.
+ *
+ * These classes are used through typedefs in the cl namespace:
+ * cl::array, cl::vector, cl::pointer and cl::string.
+ * In addition cl::allocate_pointer forwards to std::allocate_shared
+ * by default.
+ * In all cases these standard library classes can be replaced with
+ * custom interface-compatible versions using the CL_HPP_NO_STD_ARRAY,
+ * CL_HPP_NO_STD_VECTOR, CL_HPP_NO_STD_UNIQUE_PTR and
+ * CL_HPP_NO_STD_STRING macros.
+ *
+ * The OpenCL 1.x versions of the C++ bindings included a size_t wrapper
+ * class to interface with kernel enqueue. This caused unpleasant interactions
+ * with the standard size_t declaration and led to namespacing bugs.
+ * In the 2.0 version we have replaced this with a std::array-based interface.
+ * However, the old behaviour can be regained for backward compatibility
+ * using the CL_HPP_ENABLE_SIZE_T_COMPATIBILITY macro.
+ *
+ * Finally, the program construction interface used a clumsy vector-of-pairs
+ * design in the earlier versions. We have replaced that with a cleaner
+ * vector-of-vectors and vector-of-strings design. However, for backward
+ * compatibility old behaviour can be regained with the
+ * CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY macro.
+ *
+ * In OpenCL 2.0 OpenCL C is not entirely backward compatibility with
+ * earlier versions. As a result a flag must be passed to the OpenCL C
+ * compiled to request OpenCL 2.0 compilation of kernels with 1.2 as
+ * the default in the absence of the flag.
+ * In some cases the C++ bindings automatically compile code for ease.
+ * For those cases the compilation defaults to OpenCL C 2.0.
+ * If this is not wanted, the CL_HPP_CL_1_2_DEFAULT_BUILD macro may
+ * be specified to assume 1.2 compilation.
+ * If more fine-grained decisions on a per-kernel bases are required
+ * then explicit build operations that take the flag should be used.
+ *
+ *
+ * \section parameterization Parameters
+ * This header may be parameterized by a set of preprocessor macros.
+ * CL_HPP_TARGET_OPENCL_VERSION
+ * - Defines the target OpenCL runtime version to build the header against.
+ * Defaults to 200, representing OpenCL 2.0.
+ * CL_HPP_NO_STD_STRING
+ * - Do not use the standard library string class.
+ * cl::string is not defined and may be defined by the user before
+ * cl2.hpp is included.
+ * CL_HPP_NO_STD_VECTOR
+ * - Do not use the standard library vector class.
+ * cl::vector is not defined and may be defined by the user before
+ * cl2.hpp is included.
+ * CL_HPP_NO_STD_ARRAY
+ * - Do not use the standard library array class.
+ * cl::array is not defined and may be defined by the user before
+ * cl2.hpp is included.
+ * CL_HPP_NO_STD_UNIQUE_PTR
+ * - Do not use the standard library unique_ptr class.
+ * cl::pointer and the cl::allocate_pointer function are not defined
+ * and may be defined by the user before cl2.hpp is included.
+ * CL_HPP_ENABLE_DEVICE_FISSION
+ * - Enables device fission for OpenCL 1.2 platforms
+ * CL_HPP_ENABLE_EXCEPTIONS
+ * - Enable exceptions for use in the C++ bindings header.
+ * This is the preferred error handling mechanism but is not required.
+ * CL_HPP_ENABLE_SIZE_T_COMPATIBILITY
+ * - Backward compatibility option to support cl.hpp-style size_t class.
+ * Replaces the updated std::array derived version and removal of size_t
+ * from the namespace. Note that in this case the new size_t class
+ * is placed in the cl::compatibility namespace and thus requires
+ * an additional using declaration for direct backward compatibility.
+ * CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY
+ * - Enable older vector of pairs interface for construction of programs.
+ * CL_HPP_CL_1_2_DEFAULT_BUILD
+ * - Default to OpenCL C 1.2 compilation rather than OpenCL C 2.0
+ * - applies to use of cl::Program construction and other program build variants.
+ *
+ *
+ * \section example Example
+ *
+ * The following example shows a general use case for the C++
+ * bindings, including support for the optional exception feature and
+ * also the supplied vector and string classes, see following sections for
+ * decriptions of these features.
+ *
+ * \code
+ #define CL_HPP_ENABLE_EXCEPTIONS
+ #define CL_HPP_TARGET_OPENCL_VERSION 200
+
+ #include <CL/cl2.hpp>
+ #include <iostream>
+ #include <vector>
+ #include <memory>
+ #include <algorithm>
+
+ const int numElements = 32;
+
+ int main(void)
+ {
+ // Filter for a 2.0 platform and set it as the default
+ std::vector<cl::Platform> platforms;
+ cl::Platform::get(&platforms);
+ cl::Platform plat;
+ for (auto &p : platforms) {
+ std::string platver = p.getInfo<CL_PLATFORM_VERSION>();
+ if (platver.find("OpenCL 2.") != std::string::npos) {
+ plat = p;
+ }
+ }
+ if (plat() == 0) {
+ std::cout << "No OpenCL 2.0 platform found.";
+ return -1;
+ }
+
+ cl::Platform newP = cl::Platform::setDefault(plat);
+ if (newP != plat) {
+ std::cout << "Error setting default platform.";
+ return -1;
+ }
+
+ std::string kernel1{
+ "global int globalA;"
+ "kernel void updateGlobal(){"
+ " globalA = 75;"
+ "}"};
+ std::string kernel2{
+ "typedef struct { global int *bar; } Foo; kernel void vectorAdd(global const Foo* aNum, global const int *inputA, global const int *inputB, global int *output, int val, write_only pipe int outPipe, queue_t childQueue){"
+ " output[get_global_id(0)] = inputA[get_global_id(0)] + inputB[get_global_id(0)] + val + *(aNum->bar);"
+ " write_pipe(outPipe, &val);"
+ " queue_t default_queue = get_default_queue(); "
+ " ndrange_t ndrange = ndrange_1D(get_global_size(0)/2, get_global_size(0)/2); "
+ // Have a child kernel write into third quarter of output
+ " enqueue_kernel(default_queue, CLK_ENQUEUE_FLAGS_WAIT_KERNEL, ndrange, "
+ " ^{"
+ " output[get_global_size(0)*2 + get_global_id(0)] = inputA[get_global_size(0)*2+get_global_id(0)] + inputB[get_global_size(0)*2+get_global_id(0)] + globalA;"
+ " });"
+ // Have a child kernel write into last quarter of output
+ " enqueue_kernel(childQueue, CLK_ENQUEUE_FLAGS_WAIT_KERNEL, ndrange, "
+ " ^{"
+ " output[get_global_size(0)*3 + get_global_id(0)] = inputA[get_global_size(0)*3 + get_global_id(0)] + inputB[get_global_size(0)*3 + get_global_id(0)] + globalA + 2;"
+ " });"
+ "}" };
+
+ // New simpler string interface style
+ std::vector<std::string> programStrings {kernel1, kernel2};
+
+ cl::Program vectorAddProgram(
+ programStrings);
+ try {
+ vectorAddProgram.build("-cl-std=CL2.0");
+ }
+ catch (...) {
+ // Print build info for all devices
+ cl_int buildErr = CL_SUCCESS;
+ auto buildInfo = vectorAddProgram.getBuildInfo<CL_PROGRAM_BUILD_LOG>(&buildErr);
+ for (auto &pair : buildInfo) {
+ std::cerr << pair.second << std::endl << std::endl;
+ }
+
+ return 1;
+ }
+
+ typedef struct { int *bar; } Foo;
+
+ // Get and run kernel that initializes the program-scope global
+ // A test for kernels that take no arguments
+ auto program2Kernel =
+ cl::KernelFunctor<>(vectorAddProgram, "updateGlobal");
+ program2Kernel(
+ cl::EnqueueArgs(
+ cl::NDRange(1)));
+
+ //////////////////
+ // SVM allocations
+
+ cl::pointer<int> anSVMInt = cl::allocate_svm<int, cl::SVMTraitCoarse<>>();
+ *anSVMInt = 5;
+ cl::SVMAllocator<int, cl::SVMTraitCoarse<cl::SVMTraitReadOnly<>>> svmAllocReadOnly;
+ auto fooPointer = cl::allocate_pointer<Foo>(svmAllocReadOnly);
+ fooPointer->bar = anSVMInt.get();
+ cl::SVMAllocator<int, cl::SVMTraitCoarse<>> svmAlloc;
+ std::vector<int, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>> inputA(numElements, 1, svmAlloc);
+ cl::coarse_svm_vector<int> inputB(numElements, 2, svmAlloc);
+
+ //
+ //////////////
+
+ // Traditional cl_mem allocations
+ std::vector<int> output(numElements, 0xdeadbeef);
+ cl::Buffer outputBuffer(begin(output), end(output), false);
+ cl::Pipe aPipe(sizeof(cl_int), numElements / 2);
+
+ // Default command queue, also passed in as a parameter
+ cl::DeviceCommandQueue defaultDeviceQueue = cl::DeviceCommandQueue::makeDefault(
+ cl::Context::getDefault(), cl::Device::getDefault());
+
+ auto vectorAddKernel =
+ cl::KernelFunctor<
+ decltype(fooPointer)&,
+ int*,
+ cl::coarse_svm_vector<int>&,
+ cl::Buffer,
+ int,
+ cl::Pipe&,
+ cl::DeviceCommandQueue
+ >(vectorAddProgram, "vectorAdd");
+
+ // Ensure that the additional SVM pointer is available to the kernel
+ // This one was not passed as a parameter
+ vectorAddKernel.setSVMPointers(anSVMInt);
+
+ // Hand control of coarse allocations to runtime
+ cl::enqueueUnmapSVM(anSVMInt);
+ cl::enqueueUnmapSVM(fooPointer);
+ cl::unmapSVM(inputB);
+ cl::unmapSVM(output2);
+
+ cl_int error;
+ vectorAddKernel(
+ cl::EnqueueArgs(
+ cl::NDRange(numElements/2),
+ cl::NDRange(numElements/2)),
+ fooPointer,
+ inputA.data(),
+ inputB,
+ outputBuffer,
+ 3,
+ aPipe,
+ defaultDeviceQueue,
+ error
+ );
+
+ cl::copy(outputBuffer, begin(output), end(output));
+ // Grab the SVM output vector using a map
+ cl::mapSVM(output2);
+
+ cl::Device d = cl::Device::getDefault();
+
+ std::cout << "Output:\n";
+ for (int i = 1; i < numElements; ++i) {
+ std::cout << "\t" << output[i] << "\n";
+ }
+ std::cout << "\n\n";
+
+ return 0;
+ }
+ *
+ * \endcode
+ *
+ */
+#ifndef CL_HPP_
+#define CL_HPP_
+
+#ifdef __GNUC__
+#pragma GCC system_header
+#endif
+
+/* Handle deprecated preprocessor definitions. In each case, we only check for
+ * the old name if the new name is not defined, so that user code can define
+ * both and hence work with either version of the bindings.
+ */
+#if !defined(CL_HPP_USE_DX_INTEROP) && defined(USE_DX_INTEROP)
+# pragma message("cl2.hpp: USE_DX_INTEROP is deprecated. Define CL_HPP_USE_DX_INTEROP instead")
+# define CL_HPP_USE_DX_INTEROP
+#endif
+#if !defined(CL_HPP_USE_CL_DEVICE_FISSION) && defined(USE_CL_DEVICE_FISSION)
+# pragma message("cl2.hpp: USE_CL_DEVICE_FISSION is deprecated. Define CL_HPP_USE_CL_DEVICE_FISSION instead")
+# define CL_HPP_USE_CL_DEVICE_FISSION
+#endif
+#if !defined(CL_HPP_ENABLE_EXCEPTIONS) && defined(__CL_ENABLE_EXCEPTIONS)
+# pragma message("cl2.hpp: __CL_ENABLE_EXCEPTIONS is deprecated. Define CL_HPP_ENABLE_EXCEPTIONS instead")
+# define CL_HPP_ENABLE_EXCEPTIONS
+#endif
+#if !defined(CL_HPP_NO_STD_VECTOR) && defined(__NO_STD_VECTOR)
+# pragma message("cl2.hpp: __NO_STD_VECTOR is deprecated. Define CL_HPP_NO_STD_VECTOR instead")
+# define CL_HPP_NO_STD_VECTOR
+#endif
+#if !defined(CL_HPP_NO_STD_STRING) && defined(__NO_STD_STRING)
+# pragma message("cl2.hpp: __NO_STD_STRING is deprecated. Define CL_HPP_NO_STD_STRING instead")
+# define CL_HPP_NO_STD_STRING
+#endif
+#if defined(VECTOR_CLASS)
+# pragma message("cl2.hpp: VECTOR_CLASS is deprecated. Alias cl::vector instead")
+#endif
+#if defined(STRING_CLASS)
+# pragma message("cl2.hpp: STRING_CLASS is deprecated. Alias cl::string instead.")
+#endif
+#if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS) && defined(__CL_USER_OVERRIDE_ERROR_STRINGS)
+# pragma message("cl2.hpp: __CL_USER_OVERRIDE_ERROR_STRINGS is deprecated. Define CL_HPP_USER_OVERRIDE_ERROR_STRINGS instead")
+# define CL_HPP_USER_OVERRIDE_ERROR_STRINGS
+#endif
+
+/* Warn about features that are no longer supported
+ */
+#if defined(__USE_DEV_VECTOR)
+# pragma message("cl2.hpp: __USE_DEV_VECTOR is no longer supported. Expect compilation errors")
+#endif
+#if defined(__USE_DEV_STRING)
+# pragma message("cl2.hpp: __USE_DEV_STRING is no longer supported. Expect compilation errors")
+#endif
+
+/* Detect which version to target */
+#if !defined(CL_HPP_TARGET_OPENCL_VERSION)
+# pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not defined. It will default to 200 (OpenCL 2.0)")
+# define CL_HPP_TARGET_OPENCL_VERSION 200
+#endif
+#if CL_HPP_TARGET_OPENCL_VERSION != 100 && CL_HPP_TARGET_OPENCL_VERSION != 110 && CL_HPP_TARGET_OPENCL_VERSION != 120 && CL_HPP_TARGET_OPENCL_VERSION != 200
+# pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120 or 200). It will be set to 200")
+# undef CL_HPP_TARGET_OPENCL_VERSION
+# define CL_HPP_TARGET_OPENCL_VERSION 200
+#endif
+
+#if !defined(CL_HPP_MINIMUM_OPENCL_VERSION)
+# define CL_HPP_MINIMUM_OPENCL_VERSION 200
+#endif
+#if CL_HPP_MINIMUM_OPENCL_VERSION != 100 && CL_HPP_MINIMUM_OPENCL_VERSION != 110 && CL_HPP_MINIMUM_OPENCL_VERSION != 120 && CL_HPP_MINIMUM_OPENCL_VERSION != 200
+# pragma message("cl2.hpp: CL_HPP_MINIMUM_OPENCL_VERSION is not a valid value (100, 110, 120 or 200). It will be set to 100")
+# undef CL_HPP_MINIMUM_OPENCL_VERSION
+# define CL_HPP_MINIMUM_OPENCL_VERSION 100
+#endif
+#if CL_HPP_MINIMUM_OPENCL_VERSION > CL_HPP_TARGET_OPENCL_VERSION
+# error "CL_HPP_MINIMUM_OPENCL_VERSION must not be greater than CL_HPP_TARGET_OPENCL_VERSION"
+#endif
+
+#if CL_HPP_MINIMUM_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS)
+# define CL_USE_DEPRECATED_OPENCL_1_0_APIS
+#endif
+#if CL_HPP_MINIMUM_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+# define CL_USE_DEPRECATED_OPENCL_1_1_APIS
+#endif
+#if CL_HPP_MINIMUM_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
+# define CL_USE_DEPRECATED_OPENCL_1_2_APIS
+#endif
+#if CL_HPP_MINIMUM_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS)
+# define CL_USE_DEPRECATED_OPENCL_2_0_APIS
+#endif
+
+#ifdef _WIN32
+
+#include <malloc.h>
+
+#if defined(CL_HPP_USE_DX_INTEROP)
+#include <CL/cl_d3d10.h>
+#include <CL/cl_dx9_media_sharing.h>
+#endif
+#endif // _WIN32
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#endif // _MSC_VER
+
+ // Check for a valid C++ version
+
+// Need to do both tests here because for some reason __cplusplus is not
+// updated in visual studio
+#if (!defined(_MSC_VER) && __cplusplus < 201103L) || (defined(_MSC_VER) && _MSC_VER < 1700)
+#error Visual studio 2013 or another C++11-supporting compiler required
+#endif
+
+//
+#if defined(CL_HPP_USE_CL_DEVICE_FISSION) || defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
+#include <CL/cl_ext.h>
+#endif
+
+#if defined(__APPLE__) || defined(__MACOSX)
+#include <OpenCL/opencl.h>
+#else
+#include <CL/opencl.h>
+#endif // !__APPLE__
+
+#if (__cplusplus >= 201103L)
+#define CL_HPP_NOEXCEPT_ noexcept
+#else
+#define CL_HPP_NOEXCEPT_
+#endif
+
+#if defined(_MSC_VER)
+# define CL_HPP_DEFINE_STATIC_MEMBER_ __declspec(selectany)
+#else
+# define CL_HPP_DEFINE_STATIC_MEMBER_ __attribute__((weak))
+#endif // !_MSC_VER
+
+// Define deprecated prefixes and suffixes to ensure compilation
+// in case they are not pre-defined
+#if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
+#define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+#endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
+#if !defined(CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED)
+#define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+#endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED)
+
+#if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
+#define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED
+#endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
+#if !defined(CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED)
+#define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED
+#endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED)
+
+#if !defined(CL_CALLBACK)
+#define CL_CALLBACK
+#endif //CL_CALLBACK
+
+#include <utility>
+#include <limits>
+#include <iterator>
+#include <mutex>
+#include <cstring>
+#include <functional>
+
+
+// Define a size_type to represent a correctly resolved size_t
+#if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
+namespace cl {
+ using size_type = ::size_t;
+} // namespace cl
+#else // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
+namespace cl {
+ using size_type = size_t;
+} // namespace cl
+#endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
+
+
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+#include <exception>
+#endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
+
+#if !defined(CL_HPP_NO_STD_VECTOR)
+#include <vector>
+namespace cl {
+ template < class T, class Alloc = std::allocator<T> >
+ using vector = std::vector<T, Alloc>;
+} // namespace cl
+#endif // #if !defined(CL_HPP_NO_STD_VECTOR)
+
+#if !defined(CL_HPP_NO_STD_STRING)
+#include <string>
+namespace cl {
+ using string = std::string;
+} // namespace cl
+#endif // #if !defined(CL_HPP_NO_STD_STRING)
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+#if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
+#include <memory>
+namespace cl {
+ // Replace unique_ptr and allocate_pointer for internal use
+ // to allow user to replace them
+ template<class T, class D>
+ using pointer = std::unique_ptr<T, D>;
+} // namespace cl
+#endif
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+#if !defined(CL_HPP_NO_STD_ARRAY)
+#include <array>
+namespace cl {
+ template < class T, size_type N >
+ using array = std::array<T, N>;
+} // namespace cl
+#endif // #if !defined(CL_HPP_NO_STD_ARRAY)
+
+// Define size_type appropriately to allow backward-compatibility
+// use of the old size_t interface class
+#if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
+namespace cl {
+ namespace compatibility {
+ /*! \brief class used to interface between C++ and
+ * OpenCL C calls that require arrays of size_t values, whose
+ * size is known statically.
+ */
+ template <int N>
+ class size_t
+ {
+ private:
+ size_type data_[N];
+
+ public:
+ //! \brief Initialize size_t to all 0s
+ size_t()
+ {
+ for (int i = 0; i < N; ++i) {
+ data_[i] = 0;
+ }
+ }
+
+ size_t(const array<size_type, N> &rhs)
+ {
+ for (int i = 0; i < N; ++i) {
+ data_[i] = rhs[i];
+ }
+ }
+
+ size_type& operator[](int index)
+ {
+ return data_[index];
+ }
+
+ const size_type& operator[](int index) const
+ {
+ return data_[index];
+ }
+
+ //! \brief Conversion operator to T*.
+ operator size_type* () { return data_; }
+
+ //! \brief Conversion operator to const T*.
+ operator const size_type* () const { return data_; }
+
+ operator array<size_type, N>() const
+ {
+ array<size_type, N> ret;
+
+ for (int i = 0; i < N; ++i) {
+ ret[i] = data_[i];
+ }
+ return ret;
+ }
+ };
+ } // namespace compatibility
+
+ template<int N>
+ using size_t = compatibility::size_t<N>;
+} // namespace cl
+#endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY)
+
+// Helper alias to avoid confusing the macros
+namespace cl {
+ namespace detail {
+ using size_t_array = array<size_type, 3>;
+ } // namespace detail
+} // namespace cl
+
+
+/*! \namespace cl
+ *
+ * \brief The OpenCL C++ bindings are defined within this namespace.
+ *
+ */
+namespace cl {
+ class Memory;
+
+#define CL_HPP_INIT_CL_EXT_FCN_PTR_(name) \
+ if (!pfn_##name) { \
+ pfn_##name = (PFN_##name) \
+ clGetExtensionFunctionAddress(#name); \
+ if (!pfn_##name) { \
+ } \
+ }
+
+#define CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, name) \
+ if (!pfn_##name) { \
+ pfn_##name = (PFN_##name) \
+ clGetExtensionFunctionAddressForPlatform(platform, #name); \
+ if (!pfn_##name) { \
+ } \
+ }
+
+ class Program;
+ class Device;
+ class Context;
+ class CommandQueue;
+ class DeviceCommandQueue;
+ class Memory;
+ class Buffer;
+ class Pipe;
+
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ /*! \brief Exception class
+ *
+ * This may be thrown by API functions when CL_HPP_ENABLE_EXCEPTIONS is defined.
+ */
+ class Error : public std::exception
+ {
+ private:
+ cl_int err_;
+ const char * errStr_;
+ public:
+ /*! \brief Create a new CL error exception for a given error code
+ * and corresponding message.
+ *
+ * \param err error code value.
+ *
+ * \param errStr a descriptive string that must remain in scope until
+ * handling of the exception has concluded. If set, it
+ * will be returned by what().
+ */
+ Error(cl_int err, const char * errStr = NULL) : err_(err), errStr_(errStr)
+ {}
+
+ ~Error() throw() {}
+
+ /*! \brief Get error string associated with exception
+ *
+ * \return A memory pointer to the error message string.
+ */
+ virtual const char * what() const throw ()
+ {
+ if (errStr_ == NULL) {
+ return "empty";
+ }
+ else {
+ return errStr_;
+ }
+ }
+
+ /*! \brief Get error code associated with exception
+ *
+ * \return The error code.
+ */
+ cl_int err(void) const { return err_; }
+ };
+#define CL_HPP_ERR_STR_(x) #x
+#else
+#define CL_HPP_ERR_STR_(x) NULL
+#endif // CL_HPP_ENABLE_EXCEPTIONS
+
+
+namespace detail
+{
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+static inline cl_int errHandler (
+ cl_int err,
+ const char * errStr = NULL)
+{
+ if (err != CL_SUCCESS) {
+ throw Error(err, errStr);
+ }
+ return err;
+}
+#else
+static inline cl_int errHandler (cl_int err, const char * errStr = NULL)
+{
+ (void) errStr; // suppress unused variable warning
+ return err;
+}
+#endif // CL_HPP_ENABLE_EXCEPTIONS
+}
+
+
+
+//! \cond DOXYGEN_DETAIL
+#if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
+#define __GET_DEVICE_INFO_ERR CL_HPP_ERR_STR_(clGetDeviceInfo)
+#define __GET_PLATFORM_INFO_ERR CL_HPP_ERR_STR_(clGetPlatformInfo)
+#define __GET_DEVICE_IDS_ERR CL_HPP_ERR_STR_(clGetDeviceIDs)
+#define __GET_PLATFORM_IDS_ERR CL_HPP_ERR_STR_(clGetPlatformIDs)
+#define __GET_CONTEXT_INFO_ERR CL_HPP_ERR_STR_(clGetContextInfo)
+#define __GET_EVENT_INFO_ERR CL_HPP_ERR_STR_(clGetEventInfo)
+#define __GET_EVENT_PROFILE_INFO_ERR CL_HPP_ERR_STR_(clGetEventProfileInfo)
+#define __GET_MEM_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetMemObjectInfo)
+#define __GET_IMAGE_INFO_ERR CL_HPP_ERR_STR_(clGetImageInfo)
+#define __GET_SAMPLER_INFO_ERR CL_HPP_ERR_STR_(clGetSamplerInfo)
+#define __GET_KERNEL_INFO_ERR CL_HPP_ERR_STR_(clGetKernelInfo)
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __GET_KERNEL_ARG_INFO_ERR CL_HPP_ERR_STR_(clGetKernelArgInfo)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __GET_KERNEL_WORK_GROUP_INFO_ERR CL_HPP_ERR_STR_(clGetKernelWorkGroupInfo)
+#define __GET_PROGRAM_INFO_ERR CL_HPP_ERR_STR_(clGetProgramInfo)
+#define __GET_PROGRAM_BUILD_INFO_ERR CL_HPP_ERR_STR_(clGetProgramBuildInfo)
+#define __GET_COMMAND_QUEUE_INFO_ERR CL_HPP_ERR_STR_(clGetCommandQueueInfo)
+
+#define __CREATE_CONTEXT_ERR CL_HPP_ERR_STR_(clCreateContext)
+#define __CREATE_CONTEXT_FROM_TYPE_ERR CL_HPP_ERR_STR_(clCreateContextFromType)
+#define __GET_SUPPORTED_IMAGE_FORMATS_ERR CL_HPP_ERR_STR_(clGetSupportedImageFormats)
+
+#define __CREATE_BUFFER_ERR CL_HPP_ERR_STR_(clCreateBuffer)
+#define __COPY_ERR CL_HPP_ERR_STR_(cl::copy)
+#define __CREATE_SUBBUFFER_ERR CL_HPP_ERR_STR_(clCreateSubBuffer)
+#define __CREATE_GL_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer)
+#define __CREATE_GL_RENDER_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer)
+#define __GET_GL_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetGLObjectInfo)
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __CREATE_IMAGE_ERR CL_HPP_ERR_STR_(clCreateImage)
+#define __CREATE_GL_TEXTURE_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture)
+#define __IMAGE_DIMENSION_ERR CL_HPP_ERR_STR_(Incorrect image dimensions)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR CL_HPP_ERR_STR_(clSetMemObjectDestructorCallback)
+
+#define __CREATE_USER_EVENT_ERR CL_HPP_ERR_STR_(clCreateUserEvent)
+#define __SET_USER_EVENT_STATUS_ERR CL_HPP_ERR_STR_(clSetUserEventStatus)
+#define __SET_EVENT_CALLBACK_ERR CL_HPP_ERR_STR_(clSetEventCallback)
+#define __WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clWaitForEvents)
+
+#define __CREATE_KERNEL_ERR CL_HPP_ERR_STR_(clCreateKernel)
+#define __SET_KERNEL_ARGS_ERR CL_HPP_ERR_STR_(clSetKernelArg)
+#define __CREATE_PROGRAM_WITH_SOURCE_ERR CL_HPP_ERR_STR_(clCreateProgramWithSource)
+#define __CREATE_PROGRAM_WITH_BINARY_ERR CL_HPP_ERR_STR_(clCreateProgramWithBinary)
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR CL_HPP_ERR_STR_(clCreateProgramWithBuiltInKernels)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __BUILD_PROGRAM_ERR CL_HPP_ERR_STR_(clBuildProgram)
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __COMPILE_PROGRAM_ERR CL_HPP_ERR_STR_(clCompileProgram)
+#define __LINK_PROGRAM_ERR CL_HPP_ERR_STR_(clLinkProgram)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __CREATE_KERNELS_IN_PROGRAM_ERR CL_HPP_ERR_STR_(clCreateKernelsInProgram)
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+#define __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateCommandQueueWithProperties)
+#define __CREATE_SAMPLER_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateSamplerWithProperties)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
+#define __SET_COMMAND_QUEUE_PROPERTY_ERR CL_HPP_ERR_STR_(clSetCommandQueueProperty)
+#define __ENQUEUE_READ_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueReadBuffer)
+#define __ENQUEUE_READ_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueReadBufferRect)
+#define __ENQUEUE_WRITE_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueWriteBuffer)
+#define __ENQUEUE_WRITE_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueWriteBufferRect)
+#define __ENQEUE_COPY_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyBuffer)
+#define __ENQEUE_COPY_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferRect)
+#define __ENQUEUE_FILL_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueFillBuffer)
+#define __ENQUEUE_READ_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueReadImage)
+#define __ENQUEUE_WRITE_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueWriteImage)
+#define __ENQUEUE_COPY_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyImage)
+#define __ENQUEUE_FILL_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueFillImage)
+#define __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyImageToBuffer)
+#define __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferToImage)
+#define __ENQUEUE_MAP_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueMapBuffer)
+#define __ENQUEUE_MAP_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueMapImage)
+#define __ENQUEUE_UNMAP_MEM_OBJECT_ERR CL_HPP_ERR_STR_(clEnqueueUnMapMemObject)
+#define __ENQUEUE_NDRANGE_KERNEL_ERR CL_HPP_ERR_STR_(clEnqueueNDRangeKernel)
+#define __ENQUEUE_NATIVE_KERNEL CL_HPP_ERR_STR_(clEnqueueNativeKernel)
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __ENQUEUE_MIGRATE_MEM_OBJECTS_ERR CL_HPP_ERR_STR_(clEnqueueMigrateMemObjects)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+#define __ENQUEUE_ACQUIRE_GL_ERR CL_HPP_ERR_STR_(clEnqueueAcquireGLObjects)
+#define __ENQUEUE_RELEASE_GL_ERR CL_HPP_ERR_STR_(clEnqueueReleaseGLObjects)
+
+#define __CREATE_PIPE_ERR CL_HPP_ERR_STR_(clCreatePipe)
+#define __GET_PIPE_INFO_ERR CL_HPP_ERR_STR_(clGetPipeInfo)
+
+
+#define __RETAIN_ERR CL_HPP_ERR_STR_(Retain Object)
+#define __RELEASE_ERR CL_HPP_ERR_STR_(Release Object)
+#define __FLUSH_ERR CL_HPP_ERR_STR_(clFlush)
+#define __FINISH_ERR CL_HPP_ERR_STR_(clFinish)
+#define __VECTOR_CAPACITY_ERR CL_HPP_ERR_STR_(Vector capacity error)
+
+/**
+ * CL 1.2 version that uses device fission.
+ */
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevices)
+#else
+#define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevicesEXT)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+/**
+ * Deprecated APIs for 1.2
+ */
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+#define __ENQUEUE_MARKER_ERR CL_HPP_ERR_STR_(clEnqueueMarker)
+#define __ENQUEUE_WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clEnqueueWaitForEvents)
+#define __ENQUEUE_BARRIER_ERR CL_HPP_ERR_STR_(clEnqueueBarrier)
+#define __UNLOAD_COMPILER_ERR CL_HPP_ERR_STR_(clUnloadCompiler)
+#define __CREATE_GL_TEXTURE_2D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture2D)
+#define __CREATE_GL_TEXTURE_3D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture3D)
+#define __CREATE_IMAGE2D_ERR CL_HPP_ERR_STR_(clCreateImage2D)
+#define __CREATE_IMAGE3D_ERR CL_HPP_ERR_STR_(clCreateImage3D)
+#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+
+/**
+ * Deprecated APIs for 2.0
+ */
+#if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
+#define __CREATE_COMMAND_QUEUE_ERR CL_HPP_ERR_STR_(clCreateCommandQueue)
+#define __ENQUEUE_TASK_ERR CL_HPP_ERR_STR_(clEnqueueTask)
+#define __CREATE_SAMPLER_ERR CL_HPP_ERR_STR_(clCreateSampler)
+#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+
+/**
+ * CL 1.2 marker and barrier commands
+ */
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+#define __ENQUEUE_MARKER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueMarkerWithWaitList)
+#define __ENQUEUE_BARRIER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueBarrierWithWaitList)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+#endif // CL_HPP_USER_OVERRIDE_ERROR_STRINGS
+//! \endcond
+
+
+namespace detail {
+
+// Generic getInfoHelper. The final parameter is used to guide overload
+// resolution: the actual parameter passed is an int, which makes this
+// a worse conversion sequence than a specialization that declares the
+// parameter as an int.
+template<typename Functor, typename T>
+inline cl_int getInfoHelper(Functor f, cl_uint name, T* param, long)
+{
+ return f(name, sizeof(T), param, NULL);
+}
+
+// Specialized for getInfo<CL_PROGRAM_BINARIES>
+// Assumes that the output vector was correctly resized on the way in
+template <typename Func>
+inline cl_int getInfoHelper(Func f, cl_uint name, vector<vector<unsigned char>>* param, int)
+{
+ if (name != CL_PROGRAM_BINARIES) {
+ return CL_INVALID_VALUE;
+ }
+ if (param) {
+ // Create array of pointers, calculate total size and pass pointer array in
+ size_type numBinaries = param->size();
+ vector<unsigned char*> binariesPointers(numBinaries);
+
+ size_type totalSize = 0;
+ for (size_type i = 0; i < numBinaries; ++i)
+ {
+ binariesPointers[i] = (*param)[i].data();
+ totalSize += (*param)[i].size();
+ }
+
+ cl_int err = f(name, totalSize, binariesPointers.data(), NULL);
+
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+ }
+
+
+ return CL_SUCCESS;
+}
+
+// Specialized getInfoHelper for vector params
+template <typename Func, typename T>
+inline cl_int getInfoHelper(Func f, cl_uint name, vector<T>* param, long)
+{
+ size_type required;
+ cl_int err = f(name, 0, NULL, &required);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+ const size_type elements = required / sizeof(T);
+
+ // Temporary to avoid changing param on an error
+ vector<T> localData(elements);
+ err = f(name, required, localData.data(), NULL);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+ if (param) {
+ *param = std::move(localData);
+ }
+
+ return CL_SUCCESS;
+}
+
+/* Specialization for reference-counted types. This depends on the
+ * existence of Wrapper<T>::cl_type, and none of the other types having the
+ * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
+ * does not work, because when using a derived type (e.g. Context) the generic
+ * template will provide a better match.
+ */
+template <typename Func, typename T>
+inline cl_int getInfoHelper(
+ Func f, cl_uint name, vector<T>* param, int, typename T::cl_type = 0)
+{
+ size_type required;
+ cl_int err = f(name, 0, NULL, &required);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ const size_type elements = required / sizeof(typename T::cl_type);
+
+ vector<typename T::cl_type> value(elements);
+ err = f(name, required, value.data(), NULL);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ if (param) {
+ // Assign to convert CL type to T for each element
+ param->resize(elements);
+
+ // Assign to param, constructing with retain behaviour
+ // to correctly capture each underlying CL object
+ for (size_type i = 0; i < elements; i++) {
+ (*param)[i] = T(value[i], true);
+ }
+ }
+ return CL_SUCCESS;
+}
+
+// Specialized GetInfoHelper for string params
+template <typename Func>
+inline cl_int getInfoHelper(Func f, cl_uint name, string* param, long)
+{
+ size_type required;
+ cl_int err = f(name, 0, NULL, &required);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ // std::string has a constant data member
+ // a char vector does not
+ if (required > 0) {
+ vector<char> value(required);
+ err = f(name, required, value.data(), NULL);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+ if (param) {
+ param->assign(begin(value), prev(end(value)));
+ }
+ }
+ else if (param) {
+ param->assign("");
+ }
+ return CL_SUCCESS;
+}
+
+// Specialized GetInfoHelper for clsize_t params
+template <typename Func, size_type N>
+inline cl_int getInfoHelper(Func f, cl_uint name, array<size_type, N>* param, long)
+{
+ size_type required;
+ cl_int err = f(name, 0, NULL, &required);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ size_type elements = required / sizeof(size_type);
+ vector<size_type> value(elements, 0);
+
+ err = f(name, required, value.data(), NULL);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+
+ // Bound the copy with N to prevent overruns
+ // if passed N > than the amount copied
+ if (elements > N) {
+ elements = N;
+ }
+ for (size_type i = 0; i < elements; ++i) {
+ (*param)[i] = value[i];
+ }
+
+ return CL_SUCCESS;
+}
+
+template<typename T> struct ReferenceHandler;
+
+/* Specialization for reference-counted types. This depends on the
+ * existence of Wrapper<T>::cl_type, and none of the other types having the
+ * cl_type member. Note that simplify specifying the parameter as Wrapper<T>
+ * does not work, because when using a derived type (e.g. Context) the generic
+ * template will provide a better match.
+ */
+template<typename Func, typename T>
+inline cl_int getInfoHelper(Func f, cl_uint name, T* param, int, typename T::cl_type = 0)
+{
+ typename T::cl_type value;
+ cl_int err = f(name, sizeof(value), &value, NULL);
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+ *param = value;
+ if (value != NULL)
+ {
+ err = param->retain();
+ if (err != CL_SUCCESS) {
+ return err;
+ }
+ }
+ return CL_SUCCESS;
+}
+
+#define CL_HPP_PARAM_NAME_INFO_1_0_(F) \
+ F(cl_platform_info, CL_PLATFORM_PROFILE, string) \
+ F(cl_platform_info, CL_PLATFORM_VERSION, string) \
+ F(cl_platform_info, CL_PLATFORM_NAME, string) \
+ F(cl_platform_info, CL_PLATFORM_VENDOR, string) \
+ F(cl_platform_info, CL_PLATFORM_EXTENSIONS, string) \
+ \
+ F(cl_device_info, CL_DEVICE_TYPE, cl_device_type) \
+ F(cl_device_info, CL_DEVICE_VENDOR_ID, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_COMPUTE_UNITS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_WORK_GROUP_SIZE, size_type) \
+ F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_SIZES, cl::vector<size_type>) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_CLOCK_FREQUENCY, cl_uint) \
+ F(cl_device_info, CL_DEVICE_ADDRESS_BITS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_READ_IMAGE_ARGS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_WRITE_IMAGE_ARGS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_MEM_ALLOC_SIZE, cl_ulong) \
+ F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_WIDTH, size_type) \
+ F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_HEIGHT, size_type) \
+ F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_WIDTH, size_type) \
+ F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_HEIGHT, size_type) \
+ F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_DEPTH, size_type) \
+ F(cl_device_info, CL_DEVICE_IMAGE_SUPPORT, cl_bool) \
+ F(cl_device_info, CL_DEVICE_MAX_PARAMETER_SIZE, size_type) \
+ F(cl_device_info, CL_DEVICE_MAX_SAMPLERS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MEM_BASE_ADDR_ALIGN, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE, cl_uint) \
+ F(cl_device_info, CL_DEVICE_SINGLE_FP_CONFIG, cl_device_fp_config) \
+ F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, cl_device_mem_cache_type) \
+ F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, cl_uint)\
+ F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_SIZE, cl_ulong) \
+ F(cl_device_info, CL_DEVICE_GLOBAL_MEM_SIZE, cl_ulong) \
+ F(cl_device_info, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, cl_ulong) \
+ F(cl_device_info, CL_DEVICE_MAX_CONSTANT_ARGS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_LOCAL_MEM_TYPE, cl_device_local_mem_type) \
+ F(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE, cl_ulong) \
+ F(cl_device_info, CL_DEVICE_ERROR_CORRECTION_SUPPORT, cl_bool) \
+ F(cl_device_info, CL_DEVICE_PROFILING_TIMER_RESOLUTION, size_type) \
+ F(cl_device_info, CL_DEVICE_ENDIAN_LITTLE, cl_bool) \
+ F(cl_device_info, CL_DEVICE_AVAILABLE, cl_bool) \
+ F(cl_device_info, CL_DEVICE_COMPILER_AVAILABLE, cl_bool) \
+ F(cl_device_info, CL_DEVICE_EXECUTION_CAPABILITIES, cl_device_exec_capabilities) \
+ F(cl_device_info, CL_DEVICE_PLATFORM, cl_platform_id) \
+ F(cl_device_info, CL_DEVICE_NAME, string) \
+ F(cl_device_info, CL_DEVICE_VENDOR, string) \
+ F(cl_device_info, CL_DRIVER_VERSION, string) \
+ F(cl_device_info, CL_DEVICE_PROFILE, string) \
+ F(cl_device_info, CL_DEVICE_VERSION, string) \
+ F(cl_device_info, CL_DEVICE_EXTENSIONS, string) \
+ \
+ F(cl_context_info, CL_CONTEXT_REFERENCE_COUNT, cl_uint) \
+ F(cl_context_info, CL_CONTEXT_DEVICES, cl::vector<Device>) \
+ F(cl_context_info, CL_CONTEXT_PROPERTIES, cl::vector<cl_context_properties>) \
+ \
+ F(cl_event_info, CL_EVENT_COMMAND_QUEUE, cl::CommandQueue) \
+ F(cl_event_info, CL_EVENT_COMMAND_TYPE, cl_command_type) \
+ F(cl_event_info, CL_EVENT_REFERENCE_COUNT, cl_uint) \
+ F(cl_event_info, CL_EVENT_COMMAND_EXECUTION_STATUS, cl_int) \
+ \
+ F(cl_profiling_info, CL_PROFILING_COMMAND_QUEUED, cl_ulong) \
+ F(cl_profiling_info, CL_PROFILING_COMMAND_SUBMIT, cl_ulong) \
+ F(cl_profiling_info, CL_PROFILING_COMMAND_START, cl_ulong) \
+ F(cl_profiling_info, CL_PROFILING_COMMAND_END, cl_ulong) \
+ \
+ F(cl_mem_info, CL_MEM_TYPE, cl_mem_object_type) \
+ F(cl_mem_info, CL_MEM_FLAGS, cl_mem_flags) \
+ F(cl_mem_info, CL_MEM_SIZE, size_type) \
+ F(cl_mem_info, CL_MEM_HOST_PTR, void*) \
+ F(cl_mem_info, CL_MEM_MAP_COUNT, cl_uint) \
+ F(cl_mem_info, CL_MEM_REFERENCE_COUNT, cl_uint) \
+ F(cl_mem_info, CL_MEM_CONTEXT, cl::Context) \
+ \
+ F(cl_image_info, CL_IMAGE_FORMAT, cl_image_format) \
+ F(cl_image_info, CL_IMAGE_ELEMENT_SIZE, size_type) \
+ F(cl_image_info, CL_IMAGE_ROW_PITCH, size_type) \
+ F(cl_image_info, CL_IMAGE_SLICE_PITCH, size_type) \
+ F(cl_image_info, CL_IMAGE_WIDTH, size_type) \
+ F(cl_image_info, CL_IMAGE_HEIGHT, size_type) \
+ F(cl_image_info, CL_IMAGE_DEPTH, size_type) \
+ \
+ F(cl_sampler_info, CL_SAMPLER_REFERENCE_COUNT, cl_uint) \
+ F(cl_sampler_info, CL_SAMPLER_CONTEXT, cl::Context) \
+ F(cl_sampler_info, CL_SAMPLER_NORMALIZED_COORDS, cl_bool) \
+ F(cl_sampler_info, CL_SAMPLER_ADDRESSING_MODE, cl_addressing_mode) \
+ F(cl_sampler_info, CL_SAMPLER_FILTER_MODE, cl_filter_mode) \
+ \
+ F(cl_program_info, CL_PROGRAM_REFERENCE_COUNT, cl_uint) \
+ F(cl_program_info, CL_PROGRAM_CONTEXT, cl::Context) \
+ F(cl_program_info, CL_PROGRAM_NUM_DEVICES, cl_uint) \
+ F(cl_program_info, CL_PROGRAM_DEVICES, cl::vector<Device>) \
+ F(cl_program_info, CL_PROGRAM_SOURCE, string) \
+ F(cl_program_info, CL_PROGRAM_BINARY_SIZES, cl::vector<size_type>) \
+ F(cl_program_info, CL_PROGRAM_BINARIES, cl::vector<cl::vector<unsigned char>>) \
+ \
+ F(cl_program_build_info, CL_PROGRAM_BUILD_STATUS, cl_build_status) \
+ F(cl_program_build_info, CL_PROGRAM_BUILD_OPTIONS, string) \
+ F(cl_program_build_info, CL_PROGRAM_BUILD_LOG, string) \
+ \
+ F(cl_kernel_info, CL_KERNEL_FUNCTION_NAME, string) \
+ F(cl_kernel_info, CL_KERNEL_NUM_ARGS, cl_uint) \
+ F(cl_kernel_info, CL_KERNEL_REFERENCE_COUNT, cl_uint) \
+ F(cl_kernel_info, CL_KERNEL_CONTEXT, cl::Context) \
+ F(cl_kernel_info, CL_KERNEL_PROGRAM, cl::Program) \
+ \
+ F(cl_kernel_work_group_info, CL_KERNEL_WORK_GROUP_SIZE, size_type) \
+ F(cl_kernel_work_group_info, CL_KERNEL_COMPILE_WORK_GROUP_SIZE, cl::detail::size_t_array) \
+ F(cl_kernel_work_group_info, CL_KERNEL_LOCAL_MEM_SIZE, cl_ulong) \
+ \
+ F(cl_command_queue_info, CL_QUEUE_CONTEXT, cl::Context) \
+ F(cl_command_queue_info, CL_QUEUE_DEVICE, cl::Device) \
+ F(cl_command_queue_info, CL_QUEUE_REFERENCE_COUNT, cl_uint) \
+ F(cl_command_queue_info, CL_QUEUE_PROPERTIES, cl_command_queue_properties)
+
+
+#define CL_HPP_PARAM_NAME_INFO_1_1_(F) \
+ F(cl_context_info, CL_CONTEXT_NUM_DEVICES, cl_uint)\
+ F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, cl_uint) \
+ F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, cl_uint) \
+ F(cl_device_info, CL_DEVICE_DOUBLE_FP_CONFIG, cl_device_fp_config) \
+ F(cl_device_info, CL_DEVICE_HALF_FP_CONFIG, cl_device_fp_config) \
+ F(cl_device_info, CL_DEVICE_OPENCL_C_VERSION, string) \
+ \
+ F(cl_mem_info, CL_MEM_ASSOCIATED_MEMOBJECT, cl::Memory) \
+ F(cl_mem_info, CL_MEM_OFFSET, size_type) \
+ \
+ F(cl_kernel_work_group_info, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, size_type) \
+ F(cl_kernel_work_group_info, CL_KERNEL_PRIVATE_MEM_SIZE, cl_ulong) \
+ \
+ F(cl_event_info, CL_EVENT_CONTEXT, cl::Context)
+
+#define CL_HPP_PARAM_NAME_INFO_1_2_(F) \
+ F(cl_program_info, CL_PROGRAM_NUM_KERNELS, size_type) \
+ F(cl_program_info, CL_PROGRAM_KERNEL_NAMES, string) \
+ \
+ F(cl_program_build_info, CL_PROGRAM_BINARY_TYPE, cl_program_binary_type) \
+ \
+ F(cl_kernel_info, CL_KERNEL_ATTRIBUTES, string) \
+ \
+ F(cl_kernel_arg_info, CL_KERNEL_ARG_ADDRESS_QUALIFIER, cl_kernel_arg_address_qualifier) \
+ F(cl_kernel_arg_info, CL_KERNEL_ARG_ACCESS_QUALIFIER, cl_kernel_arg_access_qualifier) \
+ F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_NAME, string) \
+ F(cl_kernel_arg_info, CL_KERNEL_ARG_NAME, string) \
+ F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_QUALIFIER, cl_kernel_arg_type_qualifier) \
+ \
+ F(cl_device_info, CL_DEVICE_PARENT_DEVICE, cl::Device) \
+ F(cl_device_info, CL_DEVICE_PARTITION_PROPERTIES, cl::vector<cl_device_partition_property>) \
+ F(cl_device_info, CL_DEVICE_PARTITION_TYPE, cl::vector<cl_device_partition_property>) \
+ F(cl_device_info, CL_DEVICE_REFERENCE_COUNT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_INTEROP_USER_SYNC, size_type) \
+ F(cl_device_info, CL_DEVICE_PARTITION_AFFINITY_DOMAIN, cl_device_affinity_domain) \
+ F(cl_device_info, CL_DEVICE_BUILT_IN_KERNELS, string) \
+ \
+ F(cl_image_info, CL_IMAGE_ARRAY_SIZE, size_type) \
+ F(cl_image_info, CL_IMAGE_NUM_MIP_LEVELS, cl_uint) \
+ F(cl_image_info, CL_IMAGE_NUM_SAMPLES, cl_uint)
+
+#define CL_HPP_PARAM_NAME_INFO_2_0_(F) \
+ F(cl_device_info, CL_DEVICE_QUEUE_ON_HOST_PROPERTIES, cl_command_queue_properties) \
+ F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES, cl_command_queue_properties) \
+ F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE, cl_uint) \
+ F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_QUEUES, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_EVENTS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_MAX_PIPE_ARGS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PIPE_MAX_PACKET_SIZE, cl_uint) \
+ F(cl_device_info, CL_DEVICE_SVM_CAPABILITIES, cl_device_svm_capabilities) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_GLOBAL_ATOMIC_ALIGNMENT, cl_uint) \
+ F(cl_device_info, CL_DEVICE_PREFERRED_LOCAL_ATOMIC_ALIGNMENT, cl_uint) \
+ F(cl_command_queue_info, CL_QUEUE_SIZE, cl_uint) \
+ F(cl_mem_info, CL_MEM_USES_SVM_POINTER, cl_bool) \
+ F(cl_program_build_info, CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE, size_type) \
+ F(cl_pipe_info, CL_PIPE_PACKET_SIZE, cl_uint) \
+ F(cl_pipe_info, CL_PIPE_MAX_PACKETS, cl_uint)
+
+#define CL_HPP_PARAM_NAME_DEVICE_FISSION_(F) \
+ F(cl_device_info, CL_DEVICE_PARENT_DEVICE_EXT, cl_device_id) \
+ F(cl_device_info, CL_DEVICE_PARTITION_TYPES_EXT, cl::vector<cl_device_partition_property_ext>) \
+ F(cl_device_info, CL_DEVICE_AFFINITY_DOMAINS_EXT, cl::vector<cl_device_partition_property_ext>) \
+ F(cl_device_info, CL_DEVICE_REFERENCE_COUNT_EXT , cl_uint) \
+ F(cl_device_info, CL_DEVICE_PARTITION_STYLE_EXT, cl::vector<cl_device_partition_property_ext>)
+
+template <typename enum_type, cl_int Name>
+struct param_traits {};
+
+#define CL_HPP_DECLARE_PARAM_TRAITS_(token, param_name, T) \
+struct token; \
+template<> \
+struct param_traits<detail:: token,param_name> \
+{ \
+ enum { value = param_name }; \
+ typedef T param_type; \
+};
+
+CL_HPP_PARAM_NAME_INFO_1_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
+#if CL_HPP_TARGET_OPENCL_VERSION >= 110
+CL_HPP_PARAM_NAME_INFO_1_1_(CL_HPP_DECLARE_PARAM_TRAITS_)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+CL_HPP_PARAM_NAME_INFO_1_2_(CL_HPP_DECLARE_PARAM_TRAITS_)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+CL_HPP_PARAM_NAME_INFO_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
+
+
+// Flags deprecated in OpenCL 2.0
+#define CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(F) \
+ F(cl_device_info, CL_DEVICE_QUEUE_PROPERTIES, cl_command_queue_properties)
+
+#define CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(F) \
+ F(cl_device_info, CL_DEVICE_HOST_UNIFIED_MEMORY, cl_bool)
+
+#define CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(F) \
+ F(cl_image_info, CL_IMAGE_BUFFER, cl::Buffer)
+
+// Include deprecated query flags based on versions
+// Only include deprecated 1.0 flags if 2.0 not active as there is an enum clash
+#if CL_HPP_TARGET_OPENCL_VERSION > 100 && CL_HPP_MINIMUM_OPENCL_VERSION < 200 && CL_HPP_TARGET_OPENCL_VERSION < 200
+CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
+#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 110
+#if CL_HPP_TARGET_OPENCL_VERSION > 110 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
+CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
+#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
+#if CL_HPP_TARGET_OPENCL_VERSION > 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 200
+CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_)
+#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200
+
+#if defined(CL_HPP_USE_CL_DEVICE_FISSION)
+CL_HPP_PARAM_NAME_DEVICE_FISSION_(CL_HPP_DECLARE_PARAM_TRAITS_);
+#endif // CL_HPP_USE_CL_DEVICE_FISSION
+
+#ifdef CL_PLATFORM_ICD_SUFFIX_KHR
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_platform_info, CL_PLATFORM_ICD_SUFFIX_KHR, string)
+#endif
+
+#ifdef CL_DEVICE_PROFILING_TIMER_OFFSET_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_PROFILING_TIMER_OFFSET_AMD, cl_ulong)
+#endif
+
+#ifdef CL_DEVICE_GLOBAL_FREE_MEMORY_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_FREE_MEMORY_AMD, vector<size_type>)
+#endif
+#ifdef CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_SIMD_WIDTH_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_WIDTH_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_WAVEFRONT_WIDTH_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WAVEFRONT_WIDTH_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD, cl_uint)
+#endif
+#ifdef CL_DEVICE_LOCAL_MEM_BANKS_AMD
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_BANKS_AMD, cl_uint)
+#endif
+
+#ifdef CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, cl_uint)
+#endif
+#ifdef CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, cl_uint)
+#endif
+#ifdef CL_DEVICE_REGISTERS_PER_BLOCK_NV
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_REGISTERS_PER_BLOCK_NV, cl_uint)
+#endif
+#ifdef CL_DEVICE_WARP_SIZE_NV
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WARP_SIZE_NV, cl_uint)
+#endif
+#ifdef CL_DEVICE_GPU_OVERLAP_NV
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GPU_OVERLAP_NV, cl_bool)
+#endif
+#ifdef CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, cl_bool)
+#endif
+#ifdef CL_DEVICE_INTEGRATED_MEMORY_NV
+CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_INTEGRATED_MEMORY_NV, cl_bool)
+#endif
+
+// Convenience functions
+
+template <typename Func, typename T>
+inline cl_int
+getInfo(Func f, cl_uint name, T* param)
+{
+ return getInfoHelper(f, name, param, 0);
+}
+
+template <typename Func, typename Arg0>
+struct GetInfoFunctor0
+{
+ Func f_; const Arg0& arg0_;
+ cl_int operator ()(
+ cl_uint param, size_type size, void* value, size_type* size_ret)
+ { return f_(arg0_, param, size, value, size_ret); }
+};
+
+template <typename Func, typename Arg0, typename Arg1>
+struct GetInfoFunctor1
+{
+ Func f_; const Arg0& arg0_; const Arg1& arg1_;
+ cl_int operator ()(
+ cl_uint param, size_type size, void* value, size_type* size_ret)
+ { return f_(arg0_, arg1_, param, size, value, size_ret); }
+};
+
+template <typename Func, typename Arg0, typename T>
+inline cl_int
+getInfo(Func f, const Arg0& arg0, cl_uint name, T* param)
+{
+ GetInfoFunctor0<Func, Arg0> f0 = { f, arg0 };
+ return getInfoHelper(f0, name, param, 0);
+}
+
+template <typename Func, typename Arg0, typename Arg1, typename T>
+inline cl_int
+getInfo(Func f, const Arg0& arg0, const Arg1& arg1, cl_uint name, T* param)
+{
+ GetInfoFunctor1<Func, Arg0, Arg1> f0 = { f, arg0, arg1 };
+ return getInfoHelper(f0, name, param, 0);
+}
+
+
+template<typename T>
+struct ReferenceHandler
+{ };
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+/**
+ * OpenCL 1.2 devices do have retain/release.
+ */
+template <>
+struct ReferenceHandler<cl_device_id>
+{
+ /**
+ * Retain the device.
+ * \param device A valid device created using createSubDevices
+ * \return
+ * CL_SUCCESS if the function executed successfully.
+ * CL_INVALID_DEVICE if device was not a valid subdevice
+ * CL_OUT_OF_RESOURCES
+ * CL_OUT_OF_HOST_MEMORY
+ */
+ static cl_int retain(cl_device_id device)
+ { return ::clRetainDevice(device); }
+ /**
+ * Retain the device.
+ * \param device A valid device created using createSubDevices
+ * \return
+ * CL_SUCCESS if the function executed successfully.
+ * CL_INVALID_DEVICE if device was not a valid subdevice
+ * CL_OUT_OF_RESOURCES
+ * CL_OUT_OF_HOST_MEMORY
+ */
+ static cl_int release(cl_device_id device)
+ { return ::clReleaseDevice(device); }
+};
+#else // CL_HPP_TARGET_OPENCL_VERSION >= 120
+/**
+ * OpenCL 1.1 devices do not have retain/release.
+ */
+template <>
+struct ReferenceHandler<cl_device_id>
+{
+ // cl_device_id does not have retain().
+ static cl_int retain(cl_device_id)
+ { return CL_SUCCESS; }
+ // cl_device_id does not have release().
+ static cl_int release(cl_device_id)
+ { return CL_SUCCESS; }
+};
+#endif // ! (CL_HPP_TARGET_OPENCL_VERSION >= 120)
+
+template <>
+struct ReferenceHandler<cl_platform_id>
+{
+ // cl_platform_id does not have retain().
+ static cl_int retain(cl_platform_id)
+ { return CL_SUCCESS; }
+ // cl_platform_id does not have release().
+ static cl_int release(cl_platform_id)
+ { return CL_SUCCESS; }
+};
+
+template <>
+struct ReferenceHandler<cl_context>
+{
+ static cl_int retain(cl_context context)
+ { return ::clRetainContext(context); }
+ static cl_int release(cl_context context)
+ { return ::clReleaseContext(context); }
+};
+
+template <>
+struct ReferenceHandler<cl_command_queue>
+{
+ static cl_int retain(cl_command_queue queue)
+ { return ::clRetainCommandQueue(queue); }
+ static cl_int release(cl_command_queue queue)
+ { return ::clReleaseCommandQueue(queue); }
+};
+
+template <>
+struct ReferenceHandler<cl_mem>
+{
+ static cl_int retain(cl_mem memory)
+ { return ::clRetainMemObject(memory); }
+ static cl_int release(cl_mem memory)
+ { return ::clReleaseMemObject(memory); }
+};
+
+template <>
+struct ReferenceHandler<cl_sampler>
+{
+ static cl_int retain(cl_sampler sampler)
+ { return ::clRetainSampler(sampler); }
+ static cl_int release(cl_sampler sampler)
+ { return ::clReleaseSampler(sampler); }
+};
+
+template <>
+struct ReferenceHandler<cl_program>
+{
+ static cl_int retain(cl_program program)
+ { return ::clRetainProgram(program); }
+ static cl_int release(cl_program program)
+ { return ::clReleaseProgram(program); }
+};
+
+template <>
+struct ReferenceHandler<cl_kernel>
+{
+ static cl_int retain(cl_kernel kernel)
+ { return ::clRetainKernel(kernel); }
+ static cl_int release(cl_kernel kernel)
+ { return ::clReleaseKernel(kernel); }
+};
+
+template <>
+struct ReferenceHandler<cl_event>
+{
+ static cl_int retain(cl_event event)
+ { return ::clRetainEvent(event); }
+ static cl_int release(cl_event event)
+ { return ::clReleaseEvent(event); }
+};
+
+
+// Extracts version number with major in the upper 16 bits, minor in the lower 16
+static cl_uint getVersion(const vector<char> &versionInfo)
+{
+ int highVersion = 0;
+ int lowVersion = 0;
+ int index = 7;
+ while(versionInfo[index] != '.' ) {
+ highVersion *= 10;
+ highVersion += versionInfo[index]-'0';
+ ++index;
+ }
+ ++index;
+ while(versionInfo[index] != ' ' && versionInfo[index] != '\0') {
+ lowVersion *= 10;
+ lowVersion += versionInfo[index]-'0';
+ ++index;
+ }
+ return (highVersion << 16) | lowVersion;
+}
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
+static cl_uint getPlatformVersion(cl_platform_id platform)
+{
+ size_type size = 0;
+ clGetPlatformInfo(platform, CL_PLATFORM_VERSION, 0, NULL, &size);
+
+ vector<char> versionInfo(size);
+ clGetPlatformInfo(platform, CL_PLATFORM_VERSION, size, versionInfo.data(), &size);
+ return getVersion(versionInfo);
+}
+
+static cl_uint getDevicePlatformVersion(cl_device_id device)
+{
+ cl_platform_id platform;
+ clGetDeviceInfo(device, CL_DEVICE_PLATFORM, sizeof(platform), &platform, NULL);
+ return getPlatformVersion(platform);
+}
+
+static cl_uint getContextPlatformVersion(cl_context context)
+{
+ // The platform cannot be queried directly, so we first have to grab a
+ // device and obtain its context
+ size_type size = 0;
+ clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &size);
+ if (size == 0)
+ return 0;
+ vector<cl_device_id> devices(size/sizeof(cl_device_id));
+ clGetContextInfo(context, CL_CONTEXT_DEVICES, size, devices.data(), NULL);
+ return getDevicePlatformVersion(devices[0]);
+}
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
+
+template <typename T>
+class Wrapper
+{
+public:
+ typedef T cl_type;
+
+protected:
+ cl_type object_;
+
+public:
+ Wrapper() : object_(NULL) { }
+
+ Wrapper(const cl_type &obj, bool retainObject) : object_(obj)
+ {
+ if (retainObject) {
+ detail::errHandler(retain(), __RETAIN_ERR);
+ }
+ }
+
+ ~Wrapper()
+ {
+ if (object_ != NULL) { release(); }
+ }
+
+ Wrapper(const Wrapper<cl_type>& rhs)
+ {
+ object_ = rhs.object_;
+ detail::errHandler(retain(), __RETAIN_ERR);
+ }
+
+ Wrapper(Wrapper<cl_type>&& rhs) CL_HPP_NOEXCEPT_
+ {
+ object_ = rhs.object_;
+ rhs.object_ = NULL;
+ }
+
+ Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
+ {
+ if (this != &rhs) {
+ detail::errHandler(release(), __RELEASE_ERR);
+ object_ = rhs.object_;
+ detail::errHandler(retain(), __RETAIN_ERR);
+ }
+ return *this;
+ }
+
+ Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
+ {
+ if (this != &rhs) {
+ detail::errHandler(release(), __RELEASE_ERR);
+ object_ = rhs.object_;
+ rhs.object_ = NULL;
+ }
+ return *this;
+ }
+
+ Wrapper<cl_type>& operator = (const cl_type &rhs)
+ {
+ detail::errHandler(release(), __RELEASE_ERR);
+ object_ = rhs;
+ return *this;
+ }
+
+ const cl_type& operator ()() const { return object_; }
+
+ cl_type& operator ()() { return object_; }
+
+ const cl_type get() const { return object_; }
+
+ cl_type get() { return object_; }
+
+
+protected:
+ template<typename Func, typename U>
+ friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
+
+ cl_int retain() const
+ {
+ if (object_ != nullptr) {
+ return ReferenceHandler<cl_type>::retain(object_);
+ }
+ else {
+ return CL_SUCCESS;
+ }
+ }
+
+ cl_int release() const
+ {
+ if (object_ != nullptr) {
+ return ReferenceHandler<cl_type>::release(object_);
+ }
+ else {
+ return CL_SUCCESS;
+ }
+ }
+};
+
+template <>
+class Wrapper<cl_device_id>
+{
+public:
+ typedef cl_device_id cl_type;
+
+protected:
+ cl_type object_;
+ bool referenceCountable_;
+
+ static bool isReferenceCountable(cl_device_id device)
+ {
+ bool retVal = false;
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+#if CL_HPP_MINIMUM_OPENCL_VERSION < 120
+ if (device != NULL) {
+ int version = getDevicePlatformVersion(device);
+ if(version > ((1 << 16) + 1)) {
+ retVal = true;
+ }
+ }
+#else // CL_HPP_MINIMUM_OPENCL_VERSION < 120
+ retVal = true;
+#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+ return retVal;
+ }
+
+public:
+ Wrapper() : object_(NULL), referenceCountable_(false)
+ {
+ }
+
+ Wrapper(const cl_type &obj, bool retainObject) :
+ object_(obj),
+ referenceCountable_(false)
+ {
+ referenceCountable_ = isReferenceCountable(obj);
+
+ if (retainObject) {
+ detail::errHandler(retain(), __RETAIN_ERR);
+ }
+ }
+
+ ~Wrapper()
+ {
+ release();
+ }
+
+ Wrapper(const Wrapper<cl_type>& rhs)
+ {
+ object_ = rhs.object_;
+ referenceCountable_ = isReferenceCountable(object_);
+ detail::errHandler(retain(), __RETAIN_ERR);
+ }
+
+ Wrapper(Wrapper<cl_type>&& rhs) CL_HPP_NOEXCEPT_
+ {
+ object_ = rhs.object_;
+ referenceCountable_ = rhs.referenceCountable_;
+ rhs.object_ = NULL;
+ rhs.referenceCountable_ = false;
+ }
+
+ Wrapper<cl_type>& operator = (const Wrapper<cl_type>& rhs)
+ {
+ if (this != &rhs) {
+ detail::errHandler(release(), __RELEASE_ERR);
+ object_ = rhs.object_;
+ referenceCountable_ = rhs.referenceCountable_;
+ detail::errHandler(retain(), __RETAIN_ERR);
+ }
+ return *this;
+ }
+
+ Wrapper<cl_type>& operator = (Wrapper<cl_type>&& rhs)
+ {
+ if (this != &rhs) {
+ detail::errHandler(release(), __RELEASE_ERR);
+ object_ = rhs.object_;
+ referenceCountable_ = rhs.referenceCountable_;
+ rhs.object_ = NULL;
+ rhs.referenceCountable_ = false;
+ }
+ return *this;
+ }
+
+ Wrapper<cl_type>& operator = (const cl_type &rhs)
+ {
+ detail::errHandler(release(), __RELEASE_ERR);
+ object_ = rhs;
+ referenceCountable_ = isReferenceCountable(object_);
+ return *this;
+ }
+
+ const cl_type& operator ()() const { return object_; }
+
+ cl_type& operator ()() { return object_; }
+
+ const cl_type get() const { return object_; }
+
+ cl_type get() { return object_; }
+
+protected:
+ template<typename Func, typename U>
+ friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type);
+
+ template<typename Func, typename U>
+ friend inline cl_int getInfoHelper(Func, cl_uint, vector<U>*, int, typename U::cl_type);
+
+ cl_int retain() const
+ {
+ if( object_ != nullptr && referenceCountable_ ) {
+ return ReferenceHandler<cl_type>::retain(object_);
+ }
+ else {
+ return CL_SUCCESS;
+ }
+ }
+
+ cl_int release() const
+ {
+ if (object_ != nullptr && referenceCountable_) {
+ return ReferenceHandler<cl_type>::release(object_);
+ }
+ else {
+ return CL_SUCCESS;
+ }
+ }
+};
+
+template <typename T>
+inline bool operator==(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
+{
+ return lhs() == rhs();
+}
+
+template <typename T>
+inline bool operator!=(const Wrapper<T> &lhs, const Wrapper<T> &rhs)
+{
+ return !operator==(lhs, rhs);
+}
+
+} // namespace detail
+//! \endcond
+
+
+using BuildLogType = vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, CL_PROGRAM_BUILD_LOG>::param_type>>;
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+/**
+* Exception class for build errors to carry build info
+*/
+class BuildError : public Error
+{
+private:
+ BuildLogType buildLogs;
+public:
+ BuildError(cl_int err, const char * errStr, const BuildLogType &vec) : Error(err, errStr), buildLogs(vec)
+ {
+ }
+
+ BuildLogType getBuildLog() const
+ {
+ return buildLogs;
+ }
+};
+namespace detail {
+ static inline cl_int buildErrHandler(
+ cl_int err,
+ const char * errStr,
+ const BuildLogType &buildLogs)
+ {
+ if (err != CL_SUCCESS) {
+ throw BuildError(err, errStr, buildLogs);
+ }
+ return err;
+ }
+} // namespace detail
+
+#else
+namespace detail {
+ static inline cl_int buildErrHandler(
+ cl_int err,
+ const char * errStr,
+ const BuildLogType &buildLogs)
+ {
+ (void)buildLogs; // suppress unused variable warning
+ (void)errStr;
+ return err;
+ }
+} // namespace detail
+#endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
+
+
+/*! \stuct ImageFormat
+ * \brief Adds constructors and member functions for cl_image_format.
+ *
+ * \see cl_image_format
+ */
+struct ImageFormat : public cl_image_format
+{
+ //! \brief Default constructor - performs no initialization.
+ ImageFormat(){}
+
+ //! \brief Initializing constructor.
+ ImageFormat(cl_channel_order order, cl_channel_type type)
+ {
+ image_channel_order = order;
+ image_channel_data_type = type;
+ }
+
+ //! \brief Assignment operator.
+ ImageFormat& operator = (const ImageFormat& rhs)
+ {
+ if (this != &rhs) {
+ this->image_channel_data_type = rhs.image_channel_data_type;
+ this->image_channel_order = rhs.image_channel_order;
+ }
+ return *this;
+ }
+};
+
+/*! \brief Class interface for cl_device_id.
+ *
+ * \note Copies of these objects are inexpensive, since they don't 'own'
+ * any underlying resources or data structures.
+ *
+ * \see cl_device_id
+ */
+class Device : public detail::Wrapper<cl_device_id>
+{
+private:
+ static std::once_flag default_initialized_;
+ static Device default_;
+ static cl_int default_error_;
+
+ /*! \brief Create the default context.
+ *
+ * This sets @c default_ and @c default_error_. It does not throw
+ * @c cl::Error.
+ */
+ static void makeDefault();
+
+ /*! \brief Create the default platform from a provided platform.
+ *
+ * This sets @c default_. It does not throw
+ * @c cl::Error.
+ */
+ static void makeDefaultProvided(const Device &p) {
+ default_ = p;
+ }
+
+public:
+#ifdef CL_HPP_UNIT_TEST_ENABLE
+ /*! \brief Reset the default.
+ *
+ * This sets @c default_ to an empty value to support cleanup in
+ * the unit test framework.
+ * This function is not thread safe.
+ */
+ static void unitTestClearDefault() {
+ default_ = Device();
+ }
+#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
+
+ //! \brief Default constructor - initializes to NULL.
+ Device() : detail::Wrapper<cl_type>() { }
+
+ /*! \brief Constructor from cl_device_id.
+ *
+ * This simply copies the device ID value, which is an inexpensive operation.
+ */
+ explicit Device(const cl_device_id &device, bool retainObject = false) :
+ detail::Wrapper<cl_type>(device, retainObject) { }
+
+ /*! \brief Returns the first device on the default context.
+ *
+ * \see Context::getDefault()
+ */
+ static Device getDefault(
+ cl_int *errResult = NULL)
+ {
+ std::call_once(default_initialized_, makeDefault);
+ detail::errHandler(default_error_);
+ if (errResult != NULL) {
+ *errResult = default_error_;
+ }
+ return default_;
+ }
+
+ /**
+ * Modify the default device to be used by
+ * subsequent operations.
+ * Will only set the default if no default was previously created.
+ * @return updated default device.
+ * Should be compared to the passed value to ensure that it was updated.
+ */
+ static Device setDefault(const Device &default_device)
+ {
+ std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_device));
+ detail::errHandler(default_error_);
+ return default_;
+ }
+
+ /*! \brief Assignment operator from cl_device_id.
+ *
+ * This simply copies the device ID value, which is an inexpensive operation.
+ */
+ Device& operator = (const cl_device_id& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Device(const Device& dev) : detail::Wrapper<cl_type>(dev) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Device& operator = (const Device &dev)
+ {
+ detail::Wrapper<cl_type>::operator=(dev);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Device(Device&& dev) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(dev)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Device& operator = (Device &&dev)
+ {
+ detail::Wrapper<cl_type>::operator=(std::move(dev));
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetDeviceInfo().
+ template <typename T>
+ cl_int getInfo(cl_device_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetDeviceInfo, object_, name, param),
+ __GET_DEVICE_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetDeviceInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_device_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_device_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ /**
+ * CL 1.2 version
+ */
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ //! \brief Wrapper for clCreateSubDevices().
+ cl_int createSubDevices(
+ const cl_device_partition_property * properties,
+ vector<Device>* devices)
+ {
+ cl_uint n = 0;
+ cl_int err = clCreateSubDevices(object_, properties, 0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
+ }
+
+ vector<cl_device_id> ids(n);
+ err = clCreateSubDevices(object_, properties, n, ids.data(), NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
+ }
+
+ // Cannot trivially assign because we need to capture intermediates
+ // with safe construction
+ if (devices) {
+ devices->resize(ids.size());
+
+ // Assign to param, constructing with retain behaviour
+ // to correctly capture each underlying CL object
+ for (size_type i = 0; i < ids.size(); i++) {
+ // We do not need to retain because this device is being created
+ // by the runtime
+ (*devices)[i] = Device(ids[i], false);
+ }
+ }
+
+ return CL_SUCCESS;
+ }
+#elif defined(CL_HPP_USE_CL_DEVICE_FISSION)
+
+/**
+ * CL 1.1 version that uses device fission extension.
+ */
+ cl_int createSubDevices(
+ const cl_device_partition_property_ext * properties,
+ vector<Device>* devices)
+ {
+ typedef CL_API_ENTRY cl_int
+ ( CL_API_CALL * PFN_clCreateSubDevicesEXT)(
+ cl_device_id /*in_device*/,
+ const cl_device_partition_property_ext * /* properties */,
+ cl_uint /*num_entries*/,
+ cl_device_id * /*out_devices*/,
+ cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
+
+ static PFN_clCreateSubDevicesEXT pfn_clCreateSubDevicesEXT = NULL;
+ CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateSubDevicesEXT);
+
+ cl_uint n = 0;
+ cl_int err = pfn_clCreateSubDevicesEXT(object_, properties, 0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
+ }
+
+ vector<cl_device_id> ids(n);
+ err = pfn_clCreateSubDevicesEXT(object_, properties, n, ids.data(), NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR);
+ }
+ // Cannot trivially assign because we need to capture intermediates
+ // with safe construction
+ if (devices) {
+ devices->resize(ids.size());
+
+ // Assign to param, constructing with retain behaviour
+ // to correctly capture each underlying CL object
+ for (size_type i = 0; i < ids.size(); i++) {
+ // We do not need to retain because this device is being created
+ // by the runtime
+ (*devices)[i] = Device(ids[i], false);
+ }
+ }
+ return CL_SUCCESS;
+ }
+#endif // defined(CL_HPP_USE_CL_DEVICE_FISSION)
+};
+
+CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Device::default_initialized_;
+CL_HPP_DEFINE_STATIC_MEMBER_ Device Device::default_;
+CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Device::default_error_ = CL_SUCCESS;
+
+/*! \brief Class interface for cl_platform_id.
+ *
+ * \note Copies of these objects are inexpensive, since they don't 'own'
+ * any underlying resources or data structures.
+ *
+ * \see cl_platform_id
+ */
+class Platform : public detail::Wrapper<cl_platform_id>
+{
+private:
+ static std::once_flag default_initialized_;
+ static Platform default_;
+ static cl_int default_error_;
+
+ /*! \brief Create the default context.
+ *
+ * This sets @c default_ and @c default_error_. It does not throw
+ * @c cl::Error.
+ */
+ static void makeDefault() {
+ /* Throwing an exception from a call_once invocation does not do
+ * what we wish, so we catch it and save the error.
+ */
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ try
+#endif
+ {
+ // If default wasn't passed ,generate one
+ // Otherwise set it
+ cl_uint n = 0;
+
+ cl_int err = ::clGetPlatformIDs(0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ default_error_ = err;
+ return;
+ }
+ if (n == 0) {
+ default_error_ = CL_INVALID_PLATFORM;
+ return;
+ }
+
+ vector<cl_platform_id> ids(n);
+ err = ::clGetPlatformIDs(n, ids.data(), NULL);
+ if (err != CL_SUCCESS) {
+ default_error_ = err;
+ return;
+ }
+
+ default_ = Platform(ids[0]);
+ }
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ catch (cl::Error &e) {
+ default_error_ = e.err();
+ }
+#endif
+ }
+
+ /*! \brief Create the default platform from a provided platform.
+ *
+ * This sets @c default_. It does not throw
+ * @c cl::Error.
+ */
+ static void makeDefaultProvided(const Platform &p) {
+ default_ = p;
+ }
+
+public:
+#ifdef CL_HPP_UNIT_TEST_ENABLE
+ /*! \brief Reset the default.
+ *
+ * This sets @c default_ to an empty value to support cleanup in
+ * the unit test framework.
+ * This function is not thread safe.
+ */
+ static void unitTestClearDefault() {
+ default_ = Platform();
+ }
+#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
+
+ //! \brief Default constructor - initializes to NULL.
+ Platform() : detail::Wrapper<cl_type>() { }
+
+ /*! \brief Constructor from cl_platform_id.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * This simply copies the platform ID value, which is an inexpensive operation.
+ */
+ explicit Platform(const cl_platform_id &platform, bool retainObject = false) :
+ detail::Wrapper<cl_type>(platform, retainObject) { }
+
+ /*! \brief Assignment operator from cl_platform_id.
+ *
+ * This simply copies the platform ID value, which is an inexpensive operation.
+ */
+ Platform& operator = (const cl_platform_id& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ static Platform getDefault(
+ cl_int *errResult = NULL)
+ {
+ std::call_once(default_initialized_, makeDefault);
+ detail::errHandler(default_error_);
+ if (errResult != NULL) {
+ *errResult = default_error_;
+ }
+ return default_;
+ }
+
+ /**
+ * Modify the default platform to be used by
+ * subsequent operations.
+ * Will only set the default if no default was previously created.
+ * @return updated default platform.
+ * Should be compared to the passed value to ensure that it was updated.
+ */
+ static Platform setDefault(const Platform &default_platform)
+ {
+ std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_platform));
+ detail::errHandler(default_error_);
+ return default_;
+ }
+
+ //! \brief Wrapper for clGetPlatformInfo().
+ cl_int getInfo(cl_platform_info name, string* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetPlatformInfo, object_, name, param),
+ __GET_PLATFORM_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetPlatformInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_platform_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_platform_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ /*! \brief Gets a list of devices for this platform.
+ *
+ * Wraps clGetDeviceIDs().
+ */
+ cl_int getDevices(
+ cl_device_type type,
+ vector<Device>* devices) const
+ {
+ cl_uint n = 0;
+ if( devices == NULL ) {
+ return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
+ }
+ cl_int err = ::clGetDeviceIDs(object_, type, 0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
+ }
+
+ vector<cl_device_id> ids(n);
+ err = ::clGetDeviceIDs(object_, type, n, ids.data(), NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
+ }
+
+ // Cannot trivially assign because we need to capture intermediates
+ // with safe construction
+ // We must retain things we obtain from the API to avoid releasing
+ // API-owned objects.
+ if (devices) {
+ devices->resize(ids.size());
+
+ // Assign to param, constructing with retain behaviour
+ // to correctly capture each underlying CL object
+ for (size_type i = 0; i < ids.size(); i++) {
+ (*devices)[i] = Device(ids[i], true);
+ }
+ }
+ return CL_SUCCESS;
+ }
+
+#if defined(CL_HPP_USE_DX_INTEROP)
+ /*! \brief Get the list of available D3D10 devices.
+ *
+ * \param d3d_device_source.
+ *
+ * \param d3d_object.
+ *
+ * \param d3d_device_set.
+ *
+ * \param devices returns a vector of OpenCL D3D10 devices found. The cl::Device
+ * values returned in devices can be used to identify a specific OpenCL
+ * device. If \a devices argument is NULL, this argument is ignored.
+ *
+ * \return One of the following values:
+ * - CL_SUCCESS if the function is executed successfully.
+ *
+ * The application can query specific capabilities of the OpenCL device(s)
+ * returned by cl::getDevices. This can be used by the application to
+ * determine which device(s) to use.
+ *
+ * \note In the case that exceptions are enabled and a return value
+ * other than CL_SUCCESS is generated, then cl::Error exception is
+ * generated.
+ */
+ cl_int getDevices(
+ cl_d3d10_device_source_khr d3d_device_source,
+ void * d3d_object,
+ cl_d3d10_device_set_khr d3d_device_set,
+ vector<Device>* devices) const
+ {
+ typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clGetDeviceIDsFromD3D10KHR)(
+ cl_platform_id platform,
+ cl_d3d10_device_source_khr d3d_device_source,
+ void * d3d_object,
+ cl_d3d10_device_set_khr d3d_device_set,
+ cl_uint num_entries,
+ cl_device_id * devices,
+ cl_uint* num_devices);
+
+ if( devices == NULL ) {
+ return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR);
+ }
+
+ static PFN_clGetDeviceIDsFromD3D10KHR pfn_clGetDeviceIDsFromD3D10KHR = NULL;
+ CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(object_, clGetDeviceIDsFromD3D10KHR);
+
+ cl_uint n = 0;
+ cl_int err = pfn_clGetDeviceIDsFromD3D10KHR(
+ object_,
+ d3d_device_source,
+ d3d_object,
+ d3d_device_set,
+ 0,
+ NULL,
+ &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
+ }
+
+ vector<cl_device_id> ids(n);
+ err = pfn_clGetDeviceIDsFromD3D10KHR(
+ object_,
+ d3d_device_source,
+ d3d_object,
+ d3d_device_set,
+ n,
+ ids.data(),
+ NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_DEVICE_IDS_ERR);
+ }
+
+ // Cannot trivially assign because we need to capture intermediates
+ // with safe construction
+ // We must retain things we obtain from the API to avoid releasing
+ // API-owned objects.
+ if (devices) {
+ devices->resize(ids.size());
+
+ // Assign to param, constructing with retain behaviour
+ // to correctly capture each underlying CL object
+ for (size_type i = 0; i < ids.size(); i++) {
+ (*devices)[i] = Device(ids[i], true);
+ }
+ }
+ return CL_SUCCESS;
+ }
+#endif
+
+ /*! \brief Gets a list of available platforms.
+ *
+ * Wraps clGetPlatformIDs().
+ */
+ static cl_int get(
+ vector<Platform>* platforms)
+ {
+ cl_uint n = 0;
+
+ if( platforms == NULL ) {
+ return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_PLATFORM_IDS_ERR);
+ }
+
+ cl_int err = ::clGetPlatformIDs(0, NULL, &n);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
+ }
+
+ vector<cl_platform_id> ids(n);
+ err = ::clGetPlatformIDs(n, ids.data(), NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_PLATFORM_IDS_ERR);
+ }
+
+ if (platforms) {
+ platforms->resize(ids.size());
+
+ // Platforms don't reference count
+ for (size_type i = 0; i < ids.size(); i++) {
+ (*platforms)[i] = Platform(ids[i]);
+ }
+ }
+ return CL_SUCCESS;
+ }
+
+ /*! \brief Gets the first available platform.
+ *
+ * Wraps clGetPlatformIDs(), returning the first result.
+ */
+ static cl_int get(
+ Platform * platform)
+ {
+ cl_int err;
+ Platform default_platform = Platform::getDefault(&err);
+ if (platform) {
+ *platform = default_platform;
+ }
+ return err;
+ }
+
+ /*! \brief Gets the first available platform, returning it by value.
+ *
+ * \return Returns a valid platform if one is available.
+ * If no platform is available will return a null platform.
+ * Throws an exception if no platforms are available
+ * or an error condition occurs.
+ * Wraps clGetPlatformIDs(), returning the first result.
+ */
+ static Platform get(
+ cl_int * errResult = NULL)
+ {
+ cl_int err;
+ Platform default_platform = Platform::getDefault(&err);
+ if (errResult) {
+ *errResult = err;
+ }
+ return default_platform;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ //! \brief Wrapper for clUnloadCompiler().
+ cl_int
+ unloadCompiler()
+ {
+ return ::clUnloadPlatformCompiler(object_);
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+}; // class Platform
+
+CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Platform::default_initialized_;
+CL_HPP_DEFINE_STATIC_MEMBER_ Platform Platform::default_;
+CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Platform::default_error_ = CL_SUCCESS;
+
+
+/**
+ * Deprecated APIs for 1.2
+ */
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+/**
+ * Unload the OpenCL compiler.
+ * \note Deprecated for OpenCL 1.2. Use Platform::unloadCompiler instead.
+ */
+inline CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int
+UnloadCompiler() CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+inline cl_int
+UnloadCompiler()
+{
+ return ::clUnloadCompiler();
+}
+#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+
+/*! \brief Class interface for cl_context.
+ *
+ * \note Copies of these objects are shallow, meaning that the copy will refer
+ * to the same underlying cl_context as the original. For details, see
+ * clRetainContext() and clReleaseContext().
+ *
+ * \see cl_context
+ */
+class Context
+ : public detail::Wrapper<cl_context>
+{
+private:
+ static std::once_flag default_initialized_;
+ static Context default_;
+ static cl_int default_error_;
+
+ /*! \brief Create the default context from the default device type in the default platform.
+ *
+ * This sets @c default_ and @c default_error_. It does not throw
+ * @c cl::Error.
+ */
+ static void makeDefault() {
+ /* Throwing an exception from a call_once invocation does not do
+ * what we wish, so we catch it and save the error.
+ */
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ try
+#endif
+ {
+#if !defined(__APPLE__) && !defined(__MACOS)
+ const Platform &p = Platform::getDefault();
+ cl_platform_id defaultPlatform = p();
+ cl_context_properties properties[3] = {
+ CL_CONTEXT_PLATFORM, (cl_context_properties)defaultPlatform, 0
+ };
+#else // #if !defined(__APPLE__) && !defined(__MACOS)
+ cl_context_properties *properties = nullptr;
+#endif // #if !defined(__APPLE__) && !defined(__MACOS)
+
+ default_ = Context(
+ CL_DEVICE_TYPE_DEFAULT,
+ properties,
+ NULL,
+ NULL,
+ &default_error_);
+ }
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ catch (cl::Error &e) {
+ default_error_ = e.err();
+ }
+#endif
+ }
+
+
+ /*! \brief Create the default context from a provided Context.
+ *
+ * This sets @c default_. It does not throw
+ * @c cl::Error.
+ */
+ static void makeDefaultProvided(const Context &c) {
+ default_ = c;
+ }
+
+public:
+#ifdef CL_HPP_UNIT_TEST_ENABLE
+ /*! \brief Reset the default.
+ *
+ * This sets @c default_ to an empty value to support cleanup in
+ * the unit test framework.
+ * This function is not thread safe.
+ */
+ static void unitTestClearDefault() {
+ default_ = Context();
+ }
+#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
+
+ /*! \brief Constructs a context including a list of specified devices.
+ *
+ * Wraps clCreateContext().
+ */
+ Context(
+ const vector<Device>& devices,
+ cl_context_properties* properties = NULL,
+ void (CL_CALLBACK * notifyFptr)(
+ const char *,
+ const void *,
+ size_type,
+ void *) = NULL,
+ void* data = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ size_type numDevices = devices.size();
+ vector<cl_device_id> deviceIDs(numDevices);
+
+ for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
+ deviceIDs[deviceIndex] = (devices[deviceIndex])();
+ }
+
+ object_ = ::clCreateContext(
+ properties, (cl_uint) numDevices,
+ deviceIDs.data(),
+ notifyFptr, data, &error);
+
+ detail::errHandler(error, __CREATE_CONTEXT_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Context(
+ const Device& device,
+ cl_context_properties* properties = NULL,
+ void (CL_CALLBACK * notifyFptr)(
+ const char *,
+ const void *,
+ size_type,
+ void *) = NULL,
+ void* data = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ cl_device_id deviceID = device();
+
+ object_ = ::clCreateContext(
+ properties, 1,
+ &deviceID,
+ notifyFptr, data, &error);
+
+ detail::errHandler(error, __CREATE_CONTEXT_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*! \brief Constructs a context including all or a subset of devices of a specified type.
+ *
+ * Wraps clCreateContextFromType().
+ */
+ Context(
+ cl_device_type type,
+ cl_context_properties* properties = NULL,
+ void (CL_CALLBACK * notifyFptr)(
+ const char *,
+ const void *,
+ size_type,
+ void *) = NULL,
+ void* data = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+#if !defined(__APPLE__) && !defined(__MACOS)
+ cl_context_properties prop[4] = {CL_CONTEXT_PLATFORM, 0, 0, 0 };
+
+ if (properties == NULL) {
+ // Get a valid platform ID as we cannot send in a blank one
+ vector<Platform> platforms;
+ error = Platform::get(&platforms);
+ if (error != CL_SUCCESS) {
+ detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ return;
+ }
+
+ // Check the platforms we found for a device of our specified type
+ cl_context_properties platform_id = 0;
+ for (unsigned int i = 0; i < platforms.size(); i++) {
+
+ vector<Device> devices;
+
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ try {
+#endif
+
+ error = platforms[i].getDevices(type, &devices);
+
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ } catch (Error) {}
+ // Catch if exceptions are enabled as we don't want to exit if first platform has no devices of type
+ // We do error checking next anyway, and can throw there if needed
+#endif
+
+ // Only squash CL_SUCCESS and CL_DEVICE_NOT_FOUND
+ if (error != CL_SUCCESS && error != CL_DEVICE_NOT_FOUND) {
+ detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ if (devices.size() > 0) {
+ platform_id = (cl_context_properties)platforms[i]();
+ break;
+ }
+ }
+
+ if (platform_id == 0) {
+ detail::errHandler(CL_DEVICE_NOT_FOUND, __CREATE_CONTEXT_FROM_TYPE_ERR);
+ if (err != NULL) {
+ *err = CL_DEVICE_NOT_FOUND;
+ }
+ return;
+ }
+
+ prop[1] = platform_id;
+ properties = &prop[0];
+ }
+#endif
+ object_ = ::clCreateContextFromType(
+ properties, type, notifyFptr, data, &error);
+
+ detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Context(const Context& ctx) : detail::Wrapper<cl_type>(ctx) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Context& operator = (const Context &ctx)
+ {
+ detail::Wrapper<cl_type>::operator=(ctx);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Context(Context&& ctx) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(ctx)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Context& operator = (Context &&ctx)
+ {
+ detail::Wrapper<cl_type>::operator=(std::move(ctx));
+ return *this;
+ }
+
+
+ /*! \brief Returns a singleton context including all devices of CL_DEVICE_TYPE_DEFAULT.
+ *
+ * \note All calls to this function return the same cl_context as the first.
+ */
+ static Context getDefault(cl_int * err = NULL)
+ {
+ std::call_once(default_initialized_, makeDefault);
+ detail::errHandler(default_error_);
+ if (err != NULL) {
+ *err = default_error_;
+ }
+ return default_;
+ }
+
+ /**
+ * Modify the default context to be used by
+ * subsequent operations.
+ * Will only set the default if no default was previously created.
+ * @return updated default context.
+ * Should be compared to the passed value to ensure that it was updated.
+ */
+ static Context setDefault(const Context &default_context)
+ {
+ std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_context));
+ detail::errHandler(default_error_);
+ return default_;
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Context() : detail::Wrapper<cl_type>() { }
+
+ /*! \brief Constructor from cl_context - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the cl_context
+ * into the new Context object.
+ */
+ explicit Context(const cl_context& context, bool retainObject = false) :
+ detail::Wrapper<cl_type>(context, retainObject) { }
+
+ /*! \brief Assignment operator from cl_context - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the rhs and calls
+ * clReleaseContext() on the value previously held by this instance.
+ */
+ Context& operator = (const cl_context& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetContextInfo().
+ template <typename T>
+ cl_int getInfo(cl_context_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetContextInfo, object_, name, param),
+ __GET_CONTEXT_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetContextInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_context_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_context_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ /*! \brief Gets a list of supported image formats.
+ *
+ * Wraps clGetSupportedImageFormats().
+ */
+ cl_int getSupportedImageFormats(
+ cl_mem_flags flags,
+ cl_mem_object_type type,
+ vector<ImageFormat>* formats) const
+ {
+ cl_uint numEntries;
+
+ if (!formats) {
+ return CL_SUCCESS;
+ }
+
+ cl_int err = ::clGetSupportedImageFormats(
+ object_,
+ flags,
+ type,
+ 0,
+ NULL,
+ &numEntries);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
+ }
+
+ if (numEntries > 0) {
+ vector<ImageFormat> value(numEntries);
+ err = ::clGetSupportedImageFormats(
+ object_,
+ flags,
+ type,
+ numEntries,
+ (cl_image_format*)value.data(),
+ NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR);
+ }
+
+ formats->assign(begin(value), end(value));
+ }
+ else {
+ // If no values are being returned, ensure an empty vector comes back
+ formats->clear();
+ }
+
+ return CL_SUCCESS;
+ }
+};
+
+inline void Device::makeDefault()
+{
+ /* Throwing an exception from a call_once invocation does not do
+ * what we wish, so we catch it and save the error.
+ */
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ try
+#endif
+ {
+ cl_int error = 0;
+
+ Context context = Context::getDefault(&error);
+ detail::errHandler(error, __CREATE_CONTEXT_ERR);
+
+ if (error != CL_SUCCESS) {
+ default_error_ = error;
+ }
+ else {
+ default_ = context.getInfo<CL_CONTEXT_DEVICES>()[0];
+ default_error_ = CL_SUCCESS;
+ }
+ }
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ catch (cl::Error &e) {
+ default_error_ = e.err();
+ }
+#endif
+}
+
+CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Context::default_initialized_;
+CL_HPP_DEFINE_STATIC_MEMBER_ Context Context::default_;
+CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Context::default_error_ = CL_SUCCESS;
+
+/*! \brief Class interface for cl_event.
+ *
+ * \note Copies of these objects are shallow, meaning that the copy will refer
+ * to the same underlying cl_event as the original. For details, see
+ * clRetainEvent() and clReleaseEvent().
+ *
+ * \see cl_event
+ */
+class Event : public detail::Wrapper<cl_event>
+{
+public:
+ //! \brief Default constructor - initializes to NULL.
+ Event() : detail::Wrapper<cl_type>() { }
+
+ /*! \brief Constructor from cl_event - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * This effectively transfers ownership of a refcount on the cl_event
+ * into the new Event object.
+ */
+ explicit Event(const cl_event& event, bool retainObject = false) :
+ detail::Wrapper<cl_type>(event, retainObject) { }
+
+ /*! \brief Assignment operator from cl_event - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the rhs and calls
+ * clReleaseEvent() on the value previously held by this instance.
+ */
+ Event& operator = (const cl_event& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetEventInfo().
+ template <typename T>
+ cl_int getInfo(cl_event_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetEventInfo, object_, name, param),
+ __GET_EVENT_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetEventInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_event_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_event_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ //! \brief Wrapper for clGetEventProfilingInfo().
+ template <typename T>
+ cl_int getProfilingInfo(cl_profiling_info name, T* param) const
+ {
+ return detail::errHandler(detail::getInfo(
+ &::clGetEventProfilingInfo, object_, name, param),
+ __GET_EVENT_PROFILE_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetEventProfilingInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_profiling_info, name>::param_type
+ getProfilingInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_profiling_info, name>::param_type param;
+ cl_int result = getProfilingInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ /*! \brief Blocks the calling thread until this event completes.
+ *
+ * Wraps clWaitForEvents().
+ */
+ cl_int wait() const
+ {
+ return detail::errHandler(
+ ::clWaitForEvents(1, &object_),
+ __WAIT_FOR_EVENTS_ERR);
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 110
+ /*! \brief Registers a user callback function for a specific command execution status.
+ *
+ * Wraps clSetEventCallback().
+ */
+ cl_int setCallback(
+ cl_int type,
+ void (CL_CALLBACK * pfn_notify)(cl_event, cl_int, void *),
+ void * user_data = NULL)
+ {
+ return detail::errHandler(
+ ::clSetEventCallback(
+ object_,
+ type,
+ pfn_notify,
+ user_data),
+ __SET_EVENT_CALLBACK_ERR);
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
+
+ /*! \brief Blocks the calling thread until every event specified is complete.
+ *
+ * Wraps clWaitForEvents().
+ */
+ static cl_int
+ waitForEvents(const vector<Event>& events)
+ {
+ return detail::errHandler(
+ ::clWaitForEvents(
+ (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL),
+ __WAIT_FOR_EVENTS_ERR);
+ }
+};
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 110
+/*! \brief Class interface for user events (a subset of cl_event's).
+ *
+ * See Event for details about copy semantics, etc.
+ */
+class UserEvent : public Event
+{
+public:
+ /*! \brief Constructs a user event on a given context.
+ *
+ * Wraps clCreateUserEvent().
+ */
+ UserEvent(
+ const Context& context,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateUserEvent(
+ context(),
+ &error);
+
+ detail::errHandler(error, __CREATE_USER_EVENT_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ UserEvent() : Event() { }
+
+ /*! \brief Sets the execution status of a user event object.
+ *
+ * Wraps clSetUserEventStatus().
+ */
+ cl_int setStatus(cl_int status)
+ {
+ return detail::errHandler(
+ ::clSetUserEventStatus(object_,status),
+ __SET_USER_EVENT_STATUS_ERR);
+ }
+};
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
+
+/*! \brief Blocks the calling thread until every event specified is complete.
+ *
+ * Wraps clWaitForEvents().
+ */
+inline static cl_int
+WaitForEvents(const vector<Event>& events)
+{
+ return detail::errHandler(
+ ::clWaitForEvents(
+ (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL),
+ __WAIT_FOR_EVENTS_ERR);
+}
+
+/*! \brief Class interface for cl_mem.
+ *
+ * \note Copies of these objects are shallow, meaning that the copy will refer
+ * to the same underlying cl_mem as the original. For details, see
+ * clRetainMemObject() and clReleaseMemObject().
+ *
+ * \see cl_mem
+ */
+class Memory : public detail::Wrapper<cl_mem>
+{
+public:
+ //! \brief Default constructor - initializes to NULL.
+ Memory() : detail::Wrapper<cl_type>() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * Optionally transfer ownership of a refcount on the cl_mem
+ * into the new Memory object.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ *
+ * See Memory for further details.
+ */
+ explicit Memory(const cl_mem& memory, bool retainObject) :
+ detail::Wrapper<cl_type>(memory, retainObject) { }
+
+ /*! \brief Assignment operator from cl_mem - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the rhs and calls
+ * clReleaseMemObject() on the value previously held by this instance.
+ */
+ Memory& operator = (const cl_mem& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Memory(const Memory& mem) : detail::Wrapper<cl_type>(mem) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Memory& operator = (const Memory &mem)
+ {
+ detail::Wrapper<cl_type>::operator=(mem);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Memory(Memory&& mem) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(mem)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Memory& operator = (Memory &&mem)
+ {
+ detail::Wrapper<cl_type>::operator=(std::move(mem));
+ return *this;
+ }
+
+
+ //! \brief Wrapper for clGetMemObjectInfo().
+ template <typename T>
+ cl_int getInfo(cl_mem_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetMemObjectInfo, object_, name, param),
+ __GET_MEM_OBJECT_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetMemObjectInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_mem_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_mem_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 110
+ /*! \brief Registers a callback function to be called when the memory object
+ * is no longer needed.
+ *
+ * Wraps clSetMemObjectDestructorCallback().
+ *
+ * Repeated calls to this function, for a given cl_mem value, will append
+ * to the list of functions called (in reverse order) when memory object's
+ * resources are freed and the memory object is deleted.
+ *
+ * \note
+ * The registered callbacks are associated with the underlying cl_mem
+ * value - not the Memory class instance.
+ */
+ cl_int setDestructorCallback(
+ void (CL_CALLBACK * pfn_notify)(cl_mem, void *),
+ void * user_data = NULL)
+ {
+ return detail::errHandler(
+ ::clSetMemObjectDestructorCallback(
+ object_,
+ pfn_notify,
+ user_data),
+ __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR);
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
+
+};
+
+// Pre-declare copy functions
+class Buffer;
+template< typename IteratorType >
+cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
+template< typename IteratorType >
+cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
+template< typename IteratorType >
+cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer );
+template< typename IteratorType >
+cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator );
+
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+namespace detail
+{
+ class SVMTraitNull
+ {
+ public:
+ static cl_svm_mem_flags getSVMMemFlags()
+ {
+ return 0;
+ }
+ };
+} // namespace detail
+
+template<class Trait = detail::SVMTraitNull>
+class SVMTraitReadWrite
+{
+public:
+ static cl_svm_mem_flags getSVMMemFlags()
+ {
+ return CL_MEM_READ_WRITE |
+ Trait::getSVMMemFlags();
+ }
+};
+
+template<class Trait = detail::SVMTraitNull>
+class SVMTraitReadOnly
+{
+public:
+ static cl_svm_mem_flags getSVMMemFlags()
+ {
+ return CL_MEM_READ_ONLY |
+ Trait::getSVMMemFlags();
+ }
+};
+
+template<class Trait = detail::SVMTraitNull>
+class SVMTraitWriteOnly
+{
+public:
+ static cl_svm_mem_flags getSVMMemFlags()
+ {
+ return CL_MEM_WRITE_ONLY |
+ Trait::getSVMMemFlags();
+ }
+};
+
+template<class Trait = SVMTraitReadWrite<>>
+class SVMTraitCoarse
+{
+public:
+ static cl_svm_mem_flags getSVMMemFlags()
+ {
+ return Trait::getSVMMemFlags();
+ }
+};
+
+template<class Trait = SVMTraitReadWrite<>>
+class SVMTraitFine
+{
+public:
+ static cl_svm_mem_flags getSVMMemFlags()
+ {
+ return CL_MEM_SVM_FINE_GRAIN_BUFFER |
+ Trait::getSVMMemFlags();
+ }
+};
+
+template<class Trait = SVMTraitReadWrite<>>
+class SVMTraitAtomic
+{
+public:
+ static cl_svm_mem_flags getSVMMemFlags()
+ {
+ return
+ CL_MEM_SVM_FINE_GRAIN_BUFFER |
+ CL_MEM_SVM_ATOMICS |
+ Trait::getSVMMemFlags();
+ }
+};
+
+// Pre-declare SVM map function
+template<typename T>
+inline cl_int enqueueMapSVM(
+ T* ptr,
+ cl_bool blocking,
+ cl_map_flags flags,
+ size_type size,
+ const vector<Event>* events = NULL,
+ Event* event = NULL);
+
+/**
+ * STL-like allocator class for managing SVM objects provided for convenience.
+ *
+ * Note that while this behaves like an allocator for the purposes of constructing vectors and similar objects,
+ * care must be taken when using with smart pointers.
+ * The allocator should not be used to construct a unique_ptr if we are using coarse-grained SVM mode because
+ * the coarse-grained management behaviour would behave incorrectly with respect to reference counting.
+ *
+ * Instead the allocator embeds a Deleter which may be used with unique_ptr and is used
+ * with the allocate_shared and allocate_ptr supplied operations.
+ */
+template<typename T, class SVMTrait>
+class SVMAllocator {
+private:
+ Context context_;
+
+public:
+ typedef T value_type;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef std::size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+
+ template<typename U>
+ struct rebind
+ {
+ typedef SVMAllocator<U, SVMTrait> other;
+ };
+
+ template<typename U, typename V>
+ friend class SVMAllocator;
+
+ SVMAllocator() :
+ context_(Context::getDefault())
+ {
+ }
+
+ explicit SVMAllocator(cl::Context context) :
+ context_(context)
+ {
+ }
+
+
+ SVMAllocator(const SVMAllocator &other) :
+ context_(other.context_)
+ {
+ }
+
+ template<typename U>
+ SVMAllocator(const SVMAllocator<U, SVMTrait> &other) :
+ context_(other.context_)
+ {
+ }
+
+ ~SVMAllocator()
+ {
+ }
+
+ pointer address(reference r) CL_HPP_NOEXCEPT_
+ {
+ return std::addressof(r);
+ }
+
+ const_pointer address(const_reference r) CL_HPP_NOEXCEPT_
+ {
+ return std::addressof(r);
+ }
+
+ /**
+ * Allocate an SVM pointer.
+ *
+ * If the allocator is coarse-grained, this will take ownership to allow
+ * containers to correctly construct data in place.
+ */
+ pointer allocate(
+ size_type size,
+ typename cl::SVMAllocator<void, SVMTrait>::const_pointer = 0)
+ {
+ // Allocate memory with default alignment matching the size of the type
+ void* voidPointer =
+ clSVMAlloc(
+ context_(),
+ SVMTrait::getSVMMemFlags(),
+ size*sizeof(T),
+ sizeof(T));
+ pointer retValue = reinterpret_cast<pointer>(
+ voidPointer);
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ if (!retValue) {
+ std::bad_alloc excep;
+ throw excep;
+ }
+#endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS)
+
+ // If allocation was coarse-grained then map it
+ if (!(SVMTrait::getSVMMemFlags() & CL_MEM_SVM_FINE_GRAIN_BUFFER)) {
+ cl_int err = enqueueMapSVM(retValue, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, size*sizeof(T));
+ if (err != CL_SUCCESS) {
+ std::bad_alloc excep;
+ throw excep;
+ }
+ }
+
+ // If exceptions disabled, return null pointer from allocator
+ return retValue;
+ }
+
+ void deallocate(pointer p, size_type)
+ {
+ clSVMFree(context_(), p);
+ }
+
+ /**
+ * Return the maximum possible allocation size.
+ * This is the minimum of the maximum sizes of all devices in the context.
+ */
+ size_type max_size() const CL_HPP_NOEXCEPT_
+ {
+ size_type maxSize = std::numeric_limits<size_type>::max() / sizeof(T);
+
+ for (Device &d : context_.getInfo<CL_CONTEXT_DEVICES>()) {
+ maxSize = std::min(
+ maxSize,
+ static_cast<size_type>(d.getInfo<CL_DEVICE_MAX_MEM_ALLOC_SIZE>()));
+ }
+
+ return maxSize;
+ }
+
+ template< class U, class... Args >
+ void construct(U* p, Args&&... args)
+ {
+ new(p)T(args...);
+ }
+
+ template< class U >
+ void destroy(U* p)
+ {
+ p->~U();
+ }
+
+ /**
+ * Returns true if the contexts match.
+ */
+ inline bool operator==(SVMAllocator const& rhs)
+ {
+ return (context_==rhs.context_);
+ }
+
+ inline bool operator!=(SVMAllocator const& a)
+ {
+ return !operator==(a);
+ }
+}; // class SVMAllocator return cl::pointer<T>(tmp, detail::Deleter<T, Alloc>{alloc, copies});
+
+
+template<class SVMTrait>
+class SVMAllocator<void, SVMTrait> {
+public:
+ typedef void value_type;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+
+ template<typename U>
+ struct rebind
+ {
+ typedef SVMAllocator<U, SVMTrait> other;
+ };
+
+ template<typename U, typename V>
+ friend class SVMAllocator;
+};
+
+#if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
+namespace detail
+{
+ template<class Alloc>
+ class Deleter {
+ private:
+ Alloc alloc_;
+ size_type copies_;
+
+ public:
+ typedef typename std::allocator_traits<Alloc>::pointer pointer;
+
+ Deleter(const Alloc &alloc, size_type copies) : alloc_{ alloc }, copies_{ copies }
+ {
+ }
+
+ void operator()(pointer ptr) const {
+ Alloc tmpAlloc{ alloc_ };
+ std::allocator_traits<Alloc>::destroy(tmpAlloc, std::addressof(*ptr));
+ std::allocator_traits<Alloc>::deallocate(tmpAlloc, ptr, copies_);
+ }
+ };
+} // namespace detail
+
+/**
+ * Allocation operation compatible with std::allocate_ptr.
+ * Creates a unique_ptr<T> by default.
+ * This requirement is to ensure that the control block is not
+ * allocated in memory inaccessible to the host.
+ */
+template <class T, class Alloc, class... Args>
+cl::pointer<T, detail::Deleter<Alloc>> allocate_pointer(const Alloc &alloc_, Args&&... args)
+{
+ Alloc alloc(alloc_);
+ static const size_t copies = 1;
+
+ // Ensure that creation of the management block and the
+ // object are dealt with separately such that we only provide a deleter
+
+ T* tmp = std::allocator_traits<Alloc>::allocate(alloc, copies);
+ if (!tmp) {
+ std::bad_alloc excep;
+ throw excep;
+ }
+ try {
+ std::allocator_traits<Alloc>::construct(
+ alloc,
+ std::addressof(*tmp),
+ std::forward<Args>(args)...);
+
+ return cl::pointer<T, detail::Deleter<Alloc>>(tmp, detail::Deleter<Alloc>{alloc, copies});
+ }
+ catch (std::bad_alloc b)
+ {
+ std::allocator_traits<Alloc>::deallocate(alloc, tmp, copies);
+ throw;
+ }
+}
+
+template< class T, class SVMTrait, class... Args >
+cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(Args... args)
+{
+ SVMAllocator<T, SVMTrait> alloc;
+ return cl::allocate_pointer<T>(alloc, args...);
+}
+
+template< class T, class SVMTrait, class... Args >
+cl::pointer<T, detail::Deleter<SVMAllocator<T, SVMTrait>>> allocate_svm(const cl::Context &c, Args... args)
+{
+ SVMAllocator<T, SVMTrait> alloc(c);
+ return cl::allocate_pointer<T>(alloc, args...);
+}
+#endif // #if !defined(CL_HPP_NO_STD_UNIQUE_PTR)
+
+/*! \brief Vector alias to simplify contruction of coarse-grained SVM containers.
+ *
+ */
+template < class T >
+using coarse_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitCoarse<>>>;
+
+/*! \brief Vector alias to simplify contruction of fine-grained SVM containers.
+*
+*/
+template < class T >
+using fine_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitFine<>>>;
+
+/*! \brief Vector alias to simplify contruction of fine-grained SVM containers that support platform atomics.
+*
+*/
+template < class T >
+using atomic_svm_vector = vector<T, cl::SVMAllocator<int, cl::SVMTraitAtomic<>>>;
+
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+
+/*! \brief Class interface for Buffer Memory Objects.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Buffer : public Memory
+{
+public:
+
+ /*! \brief Constructs a Buffer in a specified context.
+ *
+ * Wraps clCreateBuffer().
+ *
+ * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
+ * specified. Note alignment & exclusivity requirements.
+ */
+ Buffer(
+ const Context& context,
+ cl_mem_flags flags,
+ size_type size,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
+
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*! \brief Constructs a Buffer in the default context.
+ *
+ * Wraps clCreateBuffer().
+ *
+ * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was
+ * specified. Note alignment & exclusivity requirements.
+ *
+ * \see Context::getDefault()
+ */
+ Buffer(
+ cl_mem_flags flags,
+ size_type size,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ Context context = Context::getDefault(err);
+
+ object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error);
+
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*!
+ * \brief Construct a Buffer from a host container via iterators.
+ * IteratorType must be random access.
+ * If useHostPtr is specified iterators must represent contiguous data.
+ */
+ template< typename IteratorType >
+ Buffer(
+ IteratorType startIterator,
+ IteratorType endIterator,
+ bool readOnly,
+ bool useHostPtr = false,
+ cl_int* err = NULL)
+ {
+ typedef typename std::iterator_traits<IteratorType>::value_type DataType;
+ cl_int error;
+
+ cl_mem_flags flags = 0;
+ if( readOnly ) {
+ flags |= CL_MEM_READ_ONLY;
+ }
+ else {
+ flags |= CL_MEM_READ_WRITE;
+ }
+ if( useHostPtr ) {
+ flags |= CL_MEM_USE_HOST_PTR;
+ }
+
+ size_type size = sizeof(DataType)*(endIterator - startIterator);
+
+ Context context = Context::getDefault(err);
+
+ if( useHostPtr ) {
+ object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
+ } else {
+ object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
+ }
+
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ if( !useHostPtr ) {
+ error = cl::copy(startIterator, endIterator, *this);
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+ }
+
+ /*!
+ * \brief Construct a Buffer from a host container via iterators using a specified context.
+ * IteratorType must be random access.
+ * If useHostPtr is specified iterators must represent contiguous data.
+ */
+ template< typename IteratorType >
+ Buffer(const Context &context, IteratorType startIterator, IteratorType endIterator,
+ bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
+
+ /*!
+ * \brief Construct a Buffer from a host container via iterators using a specified queue.
+ * If useHostPtr is specified iterators must be random access.
+ */
+ template< typename IteratorType >
+ Buffer(const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator,
+ bool readOnly, bool useHostPtr = false, cl_int* err = NULL);
+
+ //! \brief Default constructor - initializes to NULL.
+ Buffer() : Memory() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with earlier versions.
+ *
+ * See Memory for further details.
+ */
+ explicit Buffer(const cl_mem& buffer, bool retainObject = false) :
+ Memory(buffer, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Buffer& operator = (const cl_mem& rhs)
+ {
+ Memory::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Buffer(const Buffer& buf) : Memory(buf) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Buffer& operator = (const Buffer &buf)
+ {
+ Memory::operator=(buf);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Buffer(Buffer&& buf) CL_HPP_NOEXCEPT_ : Memory(std::move(buf)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Buffer& operator = (Buffer &&buf)
+ {
+ Memory::operator=(std::move(buf));
+ return *this;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 110
+ /*! \brief Creates a new buffer object from this.
+ *
+ * Wraps clCreateSubBuffer().
+ */
+ Buffer createSubBuffer(
+ cl_mem_flags flags,
+ cl_buffer_create_type buffer_create_type,
+ const void * buffer_create_info,
+ cl_int * err = NULL)
+ {
+ Buffer result;
+ cl_int error;
+ result.object_ = ::clCreateSubBuffer(
+ object_,
+ flags,
+ buffer_create_type,
+ buffer_create_info,
+ &error);
+
+ detail::errHandler(error, __CREATE_SUBBUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ return result;
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
+};
+
+#if defined (CL_HPP_USE_DX_INTEROP)
+/*! \brief Class interface for creating OpenCL buffers from ID3D10Buffer's.
+ *
+ * This is provided to facilitate interoperability with Direct3D.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class BufferD3D10 : public Buffer
+{
+public:
+
+
+ /*! \brief Constructs a BufferD3D10, in a specified context, from a
+ * given ID3D10Buffer.
+ *
+ * Wraps clCreateFromD3D10BufferKHR().
+ */
+ BufferD3D10(
+ const Context& context,
+ cl_mem_flags flags,
+ ID3D10Buffer* bufobj,
+ cl_int * err = NULL) : pfn_clCreateFromD3D10BufferKHR(nullptr)
+ {
+ typedef CL_API_ENTRY cl_mem (CL_API_CALL *PFN_clCreateFromD3D10BufferKHR)(
+ cl_context context, cl_mem_flags flags, ID3D10Buffer* buffer,
+ cl_int* errcode_ret);
+ PFN_clCreateFromD3D10BufferKHR pfn_clCreateFromD3D10BufferKHR;
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ vector<cl_context_properties> props = context.getInfo<CL_CONTEXT_PROPERTIES>();
+ cl_platform platform = -1;
+ for( int i = 0; i < props.size(); ++i ) {
+ if( props[i] == CL_CONTEXT_PLATFORM ) {
+ platform = props[i+1];
+ }
+ }
+ CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCreateFromD3D10BufferKHR);
+#elif CL_HPP_TARGET_OPENCL_VERSION >= 110
+ CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateFromD3D10BufferKHR);
+#endif
+
+ cl_int error;
+ object_ = pfn_clCreateFromD3D10BufferKHR(
+ context(),
+ flags,
+ bufobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ BufferD3D10() : Buffer() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit BufferD3D10(const cl_mem& buffer, bool retainObject = false) :
+ Buffer(buffer, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferD3D10& operator = (const cl_mem& rhs)
+ {
+ Buffer::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferD3D10(const BufferD3D10& buf) :
+ Buffer(buf) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferD3D10& operator = (const BufferD3D10 &buf)
+ {
+ Buffer::operator=(buf);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferD3D10(BufferD3D10&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferD3D10& operator = (BufferD3D10 &&buf)
+ {
+ Buffer::operator=(std::move(buf));
+ return *this;
+ }
+};
+#endif
+
+/*! \brief Class interface for GL Buffer Memory Objects.
+ *
+ * This is provided to facilitate interoperability with OpenGL.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class BufferGL : public Buffer
+{
+public:
+ /*! \brief Constructs a BufferGL in a specified context, from a given
+ * GL buffer.
+ *
+ * Wraps clCreateFromGLBuffer().
+ */
+ BufferGL(
+ const Context& context,
+ cl_mem_flags flags,
+ cl_GLuint bufobj,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateFromGLBuffer(
+ context(),
+ flags,
+ bufobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ BufferGL() : Buffer() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit BufferGL(const cl_mem& buffer, bool retainObject = false) :
+ Buffer(buffer, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferGL& operator = (const cl_mem& rhs)
+ {
+ Buffer::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferGL(const BufferGL& buf) : Buffer(buf) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferGL& operator = (const BufferGL &buf)
+ {
+ Buffer::operator=(buf);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferGL(BufferGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferGL& operator = (BufferGL &&buf)
+ {
+ Buffer::operator=(std::move(buf));
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetGLObjectInfo().
+ cl_int getObjectInfo(
+ cl_gl_object_type *type,
+ cl_GLuint * gl_object_name)
+ {
+ return detail::errHandler(
+ ::clGetGLObjectInfo(object_,type,gl_object_name),
+ __GET_GL_OBJECT_INFO_ERR);
+ }
+};
+
+/*! \brief Class interface for GL Render Buffer Memory Objects.
+ *
+ * This is provided to facilitate interoperability with OpenGL.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class BufferRenderGL : public Buffer
+{
+public:
+ /*! \brief Constructs a BufferRenderGL in a specified context, from a given
+ * GL Renderbuffer.
+ *
+ * Wraps clCreateFromGLRenderbuffer().
+ */
+ BufferRenderGL(
+ const Context& context,
+ cl_mem_flags flags,
+ cl_GLuint bufobj,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateFromGLRenderbuffer(
+ context(),
+ flags,
+ bufobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_RENDER_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ BufferRenderGL() : Buffer() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit BufferRenderGL(const cl_mem& buffer, bool retainObject = false) :
+ Buffer(buffer, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ BufferRenderGL& operator = (const cl_mem& rhs)
+ {
+ Buffer::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferRenderGL(const BufferRenderGL& buf) : Buffer(buf) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferRenderGL& operator = (const BufferRenderGL &buf)
+ {
+ Buffer::operator=(buf);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferRenderGL(BufferRenderGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ BufferRenderGL& operator = (BufferRenderGL &&buf)
+ {
+ Buffer::operator=(std::move(buf));
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetGLObjectInfo().
+ cl_int getObjectInfo(
+ cl_gl_object_type *type,
+ cl_GLuint * gl_object_name)
+ {
+ return detail::errHandler(
+ ::clGetGLObjectInfo(object_,type,gl_object_name),
+ __GET_GL_OBJECT_INFO_ERR);
+ }
+};
+
+/*! \brief C++ base class for Image Memory objects.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Image : public Memory
+{
+protected:
+ //! \brief Default constructor - initializes to NULL.
+ Image() : Memory() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit Image(const cl_mem& image, bool retainObject = false) :
+ Memory(image, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image& operator = (const cl_mem& rhs)
+ {
+ Memory::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image(const Image& img) : Memory(img) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image& operator = (const Image &img)
+ {
+ Memory::operator=(img);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image(Image&& img) CL_HPP_NOEXCEPT_ : Memory(std::move(img)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image& operator = (Image &&img)
+ {
+ Memory::operator=(std::move(img));
+ return *this;
+ }
+
+
+public:
+ //! \brief Wrapper for clGetImageInfo().
+ template <typename T>
+ cl_int getImageInfo(cl_image_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetImageInfo, object_, name, param),
+ __GET_IMAGE_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetImageInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_image_info, name>::param_type
+ getImageInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_image_info, name>::param_type param;
+ cl_int result = getImageInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+};
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+/*! \brief Class interface for 1D Image Memory objects.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Image1D : public Image
+{
+public:
+ /*! \brief Constructs a 1D Image in a specified context.
+ *
+ * Wraps clCreateImage().
+ */
+ Image1D(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ size_type width,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE1D,
+ width,
+ 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ host_ptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Image1D() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit Image1D(const cl_mem& image1D, bool retainObject = false) :
+ Image(image1D, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image1D& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1D(const Image1D& img) : Image(img) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1D& operator = (const Image1D &img)
+ {
+ Image::operator=(img);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1D(Image1D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1D& operator = (Image1D &&img)
+ {
+ Image::operator=(std::move(img));
+ return *this;
+ }
+
+};
+
+/*! \class Image1DBuffer
+ * \brief Image interface for 1D buffer images.
+ */
+class Image1DBuffer : public Image
+{
+public:
+ Image1DBuffer(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ size_type width,
+ const Buffer &buffer,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE1D_BUFFER,
+ width,
+ 0, 0, 0, 0, 0, 0, 0,
+ buffer()
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ NULL,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Image1DBuffer() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit Image1DBuffer(const cl_mem& image1D, bool retainObject = false) :
+ Image(image1D, retainObject) { }
+
+ Image1DBuffer& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1DBuffer(const Image1DBuffer& img) : Image(img) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1DBuffer& operator = (const Image1DBuffer &img)
+ {
+ Image::operator=(img);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1DBuffer(Image1DBuffer&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1DBuffer& operator = (Image1DBuffer &&img)
+ {
+ Image::operator=(std::move(img));
+ return *this;
+ }
+
+};
+
+/*! \class Image1DArray
+ * \brief Image interface for arrays of 1D images.
+ */
+class Image1DArray : public Image
+{
+public:
+ Image1DArray(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ size_type arraySize,
+ size_type width,
+ size_type rowPitch,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE1D_ARRAY,
+ width,
+ 0, 0, // height, depth (unused)
+ arraySize,
+ rowPitch,
+ 0, 0, 0, 0
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ host_ptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Image1DArray() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit Image1DArray(const cl_mem& imageArray, bool retainObject = false) :
+ Image(imageArray, retainObject) { }
+
+
+ Image1DArray& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1DArray(const Image1DArray& img) : Image(img) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1DArray& operator = (const Image1DArray &img)
+ {
+ Image::operator=(img);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1DArray(Image1DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image1DArray& operator = (Image1DArray &&img)
+ {
+ Image::operator=(std::move(img));
+ return *this;
+ }
+
+};
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+
+/*! \brief Class interface for 2D Image Memory objects.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Image2D : public Image
+{
+public:
+ /*! \brief Constructs a 2D Image in a specified context.
+ *
+ * Wraps clCreateImage().
+ */
+ Image2D(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ size_type width,
+ size_type height,
+ size_type row_pitch = 0,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ bool useCreateImage;
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
+ // Run-time decision based on the actual platform
+ {
+ cl_uint version = detail::getContextPlatformVersion(context());
+ useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
+ }
+#elif CL_HPP_TARGET_OPENCL_VERSION >= 120
+ useCreateImage = true;
+#else
+ useCreateImage = false;
+#endif
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ if (useCreateImage)
+ {
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE2D,
+ width,
+ height,
+ 0, 0, // depth, array size (unused)
+ row_pitch,
+ 0, 0, 0, 0
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ host_ptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+#if CL_HPP_MINIMUM_OPENCL_VERSION < 120
+ if (!useCreateImage)
+ {
+ object_ = ::clCreateImage2D(
+ context(), flags,&format, width, height, row_pitch, host_ptr, &error);
+
+ detail::errHandler(error, __CREATE_IMAGE2D_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ /*! \brief Constructs a 2D Image from a buffer.
+ * \note This will share storage with the underlying buffer.
+ *
+ * Wraps clCreateImage().
+ */
+ Image2D(
+ const Context& context,
+ ImageFormat format,
+ const Buffer &sourceBuffer,
+ size_type width,
+ size_type height,
+ size_type row_pitch = 0,
+ cl_int* err = nullptr)
+ {
+ cl_int error;
+
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE2D,
+ width,
+ height,
+ 0, 0, // depth, array size (unused)
+ row_pitch,
+ 0, 0, 0,
+ // Use buffer as input to image
+ sourceBuffer()
+ };
+ object_ = ::clCreateImage(
+ context(),
+ 0, // flags inherited from buffer
+ &format,
+ &desc,
+ nullptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != nullptr) {
+ *err = error;
+ }
+ }
+#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ /*! \brief Constructs a 2D Image from an image.
+ * \note This will share storage with the underlying image but may
+ * reinterpret the channel order and type.
+ *
+ * The image will be created matching with a descriptor matching the source.
+ *
+ * \param order is the channel order to reinterpret the image data as.
+ * The channel order may differ as described in the OpenCL
+ * 2.0 API specification.
+ *
+ * Wraps clCreateImage().
+ */
+ Image2D(
+ const Context& context,
+ cl_channel_order order,
+ const Image &sourceImage,
+ cl_int* err = nullptr)
+ {
+ cl_int error;
+
+ // Descriptor fields have to match source image
+ size_type sourceWidth =
+ sourceImage.getImageInfo<CL_IMAGE_WIDTH>();
+ size_type sourceHeight =
+ sourceImage.getImageInfo<CL_IMAGE_HEIGHT>();
+ size_type sourceRowPitch =
+ sourceImage.getImageInfo<CL_IMAGE_ROW_PITCH>();
+ cl_uint sourceNumMIPLevels =
+ sourceImage.getImageInfo<CL_IMAGE_NUM_MIP_LEVELS>();
+ cl_uint sourceNumSamples =
+ sourceImage.getImageInfo<CL_IMAGE_NUM_SAMPLES>();
+ cl_image_format sourceFormat =
+ sourceImage.getImageInfo<CL_IMAGE_FORMAT>();
+
+ // Update only the channel order.
+ // Channel format inherited from source.
+ sourceFormat.image_channel_order = order;
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE2D,
+ sourceWidth,
+ sourceHeight,
+ 0, 0, // depth (unused), array size (unused)
+ sourceRowPitch,
+ 0, // slice pitch (unused)
+ sourceNumMIPLevels,
+ sourceNumSamples,
+ // Use buffer as input to image
+ sourceImage()
+ };
+ object_ = ::clCreateImage(
+ context(),
+ 0, // flags should be inherited from mem_object
+ &sourceFormat,
+ &desc,
+ nullptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != nullptr) {
+ *err = error;
+ }
+ }
+#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+ //! \brief Default constructor - initializes to NULL.
+ Image2D() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit Image2D(const cl_mem& image2D, bool retainObject = false) :
+ Image(image2D, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image2D& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2D(const Image2D& img) : Image(img) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2D& operator = (const Image2D &img)
+ {
+ Image::operator=(img);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2D(Image2D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2D& operator = (Image2D &&img)
+ {
+ Image::operator=(std::move(img));
+ return *this;
+ }
+
+};
+
+
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+/*! \brief Class interface for GL 2D Image Memory objects.
+ *
+ * This is provided to facilitate interoperability with OpenGL.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ * \note Deprecated for OpenCL 1.2. Please use ImageGL instead.
+ */
+class CL_EXT_PREFIX__VERSION_1_1_DEPRECATED Image2DGL : public Image2D
+{
+public:
+ /*! \brief Constructs an Image2DGL in a specified context, from a given
+ * GL Texture.
+ *
+ * Wraps clCreateFromGLTexture2D().
+ */
+ Image2DGL(
+ const Context& context,
+ cl_mem_flags flags,
+ cl_GLenum target,
+ cl_GLint miplevel,
+ cl_GLuint texobj,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateFromGLTexture2D(
+ context(),
+ flags,
+ target,
+ miplevel,
+ texobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_TEXTURE_2D_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Image2DGL() : Image2D() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit Image2DGL(const cl_mem& image, bool retainObject = false) :
+ Image2D(image, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *c
+ * See Memory for further details.
+ */
+ Image2DGL& operator = (const cl_mem& rhs)
+ {
+ Image2D::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2DGL(const Image2DGL& img) : Image2D(img) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2DGL& operator = (const Image2DGL &img)
+ {
+ Image2D::operator=(img);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2DGL(Image2DGL&& img) CL_HPP_NOEXCEPT_ : Image2D(std::move(img)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2DGL& operator = (Image2DGL &&img)
+ {
+ Image2D::operator=(std::move(img));
+ return *this;
+ }
+
+} CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+#endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+/*! \class Image2DArray
+ * \brief Image interface for arrays of 2D images.
+ */
+class Image2DArray : public Image
+{
+public:
+ Image2DArray(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ size_type arraySize,
+ size_type width,
+ size_type height,
+ size_type rowPitch,
+ size_type slicePitch,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE2D_ARRAY,
+ width,
+ height,
+ 0, // depth (unused)
+ arraySize,
+ rowPitch,
+ slicePitch,
+ 0, 0, 0
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ host_ptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Image2DArray() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit Image2DArray(const cl_mem& imageArray, bool retainObject = false) : Image(imageArray, retainObject) { }
+
+ Image2DArray& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2DArray(const Image2DArray& img) : Image(img) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2DArray& operator = (const Image2DArray &img)
+ {
+ Image::operator=(img);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2DArray(Image2DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image2DArray& operator = (Image2DArray &&img)
+ {
+ Image::operator=(std::move(img));
+ return *this;
+ }
+};
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+/*! \brief Class interface for 3D Image Memory objects.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Image3D : public Image
+{
+public:
+ /*! \brief Constructs a 3D Image in a specified context.
+ *
+ * Wraps clCreateImage().
+ */
+ Image3D(
+ const Context& context,
+ cl_mem_flags flags,
+ ImageFormat format,
+ size_type width,
+ size_type height,
+ size_type depth,
+ size_type row_pitch = 0,
+ size_type slice_pitch = 0,
+ void* host_ptr = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ bool useCreateImage;
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120
+ // Run-time decision based on the actual platform
+ {
+ cl_uint version = detail::getContextPlatformVersion(context());
+ useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above
+ }
+#elif CL_HPP_TARGET_OPENCL_VERSION >= 120
+ useCreateImage = true;
+#else
+ useCreateImage = false;
+#endif
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ if (useCreateImage)
+ {
+ cl_image_desc desc =
+ {
+ CL_MEM_OBJECT_IMAGE3D,
+ width,
+ height,
+ depth,
+ 0, // array size (unused)
+ row_pitch,
+ slice_pitch,
+ 0, 0, 0
+ };
+ object_ = ::clCreateImage(
+ context(),
+ flags,
+ &format,
+ &desc,
+ host_ptr,
+ &error);
+
+ detail::errHandler(error, __CREATE_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+#if CL_HPP_MINIMUM_OPENCL_VERSION < 120
+ if (!useCreateImage)
+ {
+ object_ = ::clCreateImage3D(
+ context(), flags, &format, width, height, depth, row_pitch,
+ slice_pitch, host_ptr, &error);
+
+ detail::errHandler(error, __CREATE_IMAGE3D_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Image3D() : Image() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit Image3D(const cl_mem& image3D, bool retainObject = false) :
+ Image(image3D, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image3D& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image3D(const Image3D& img) : Image(img) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image3D& operator = (const Image3D &img)
+ {
+ Image::operator=(img);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image3D(Image3D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image3D& operator = (Image3D &&img)
+ {
+ Image::operator=(std::move(img));
+ return *this;
+ }
+};
+
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+/*! \brief Class interface for GL 3D Image Memory objects.
+ *
+ * This is provided to facilitate interoperability with OpenGL.
+ *
+ * See Memory for details about copy semantics, etc.
+ *
+ * \see Memory
+ */
+class Image3DGL : public Image3D
+{
+public:
+ /*! \brief Constructs an Image3DGL in a specified context, from a given
+ * GL Texture.
+ *
+ * Wraps clCreateFromGLTexture3D().
+ */
+ Image3DGL(
+ const Context& context,
+ cl_mem_flags flags,
+ cl_GLenum target,
+ cl_GLint miplevel,
+ cl_GLuint texobj,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateFromGLTexture3D(
+ context(),
+ flags,
+ target,
+ miplevel,
+ texobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_TEXTURE_3D_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Image3DGL() : Image3D() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit Image3DGL(const cl_mem& image, bool retainObject = false) :
+ Image3D(image, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Image3DGL& operator = (const cl_mem& rhs)
+ {
+ Image3D::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image3DGL(const Image3DGL& img) : Image3D(img) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image3DGL& operator = (const Image3DGL &img)
+ {
+ Image3D::operator=(img);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image3DGL(Image3DGL&& img) CL_HPP_NOEXCEPT_ : Image3D(std::move(img)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Image3DGL& operator = (Image3DGL &&img)
+ {
+ Image3D::operator=(std::move(img));
+ return *this;
+ }
+};
+#endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+/*! \class ImageGL
+ * \brief general image interface for GL interop.
+ * We abstract the 2D and 3D GL images into a single instance here
+ * that wraps all GL sourced images on the grounds that setup information
+ * was performed by OpenCL anyway.
+ */
+class ImageGL : public Image
+{
+public:
+ ImageGL(
+ const Context& context,
+ cl_mem_flags flags,
+ cl_GLenum target,
+ cl_GLint miplevel,
+ cl_GLuint texobj,
+ cl_int * err = NULL)
+ {
+ cl_int error;
+ object_ = ::clCreateFromGLTexture(
+ context(),
+ flags,
+ target,
+ miplevel,
+ texobj,
+ &error);
+
+ detail::errHandler(error, __CREATE_GL_TEXTURE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ ImageGL() : Image() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * See Memory for further details.
+ */
+ explicit ImageGL(const cl_mem& image, bool retainObject = false) :
+ Image(image, retainObject) { }
+
+ ImageGL& operator = (const cl_mem& rhs)
+ {
+ Image::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ ImageGL(const ImageGL& img) : Image(img) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ ImageGL& operator = (const ImageGL &img)
+ {
+ Image::operator=(img);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ ImageGL(ImageGL&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ ImageGL& operator = (ImageGL &&img)
+ {
+ Image::operator=(std::move(img));
+ return *this;
+ }
+};
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+/*! \brief Class interface for Pipe Memory Objects.
+*
+* See Memory for details about copy semantics, etc.
+*
+* \see Memory
+*/
+class Pipe : public Memory
+{
+public:
+
+ /*! \brief Constructs a Pipe in a specified context.
+ *
+ * Wraps clCreatePipe().
+ * @param context Context in which to create the pipe.
+ * @param flags Bitfield. Only CL_MEM_READ_WRITE and CL_MEM_HOST_NO_ACCESS are valid.
+ * @param packet_size Size in bytes of a single packet of the pipe.
+ * @param max_packets Number of packets that may be stored in the pipe.
+ *
+ */
+ Pipe(
+ const Context& context,
+ cl_uint packet_size,
+ cl_uint max_packets,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
+ object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error);
+
+ detail::errHandler(error, __CREATE_PIPE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*! \brief Constructs a Pipe in a the default context.
+ *
+ * Wraps clCreatePipe().
+ * @param flags Bitfield. Only CL_MEM_READ_WRITE and CL_MEM_HOST_NO_ACCESS are valid.
+ * @param packet_size Size in bytes of a single packet of the pipe.
+ * @param max_packets Number of packets that may be stored in the pipe.
+ *
+ */
+ Pipe(
+ cl_uint packet_size,
+ cl_uint max_packets,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ Context context = Context::getDefault(err);
+
+ cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS;
+ object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error);
+
+ detail::errHandler(error, __CREATE_PIPE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ //! \brief Default constructor - initializes to NULL.
+ Pipe() : Memory() { }
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with earlier versions.
+ *
+ * See Memory for further details.
+ */
+ explicit Pipe(const cl_mem& pipe, bool retainObject = false) :
+ Memory(pipe, retainObject) { }
+
+ /*! \brief Assignment from cl_mem - performs shallow copy.
+ *
+ * See Memory for further details.
+ */
+ Pipe& operator = (const cl_mem& rhs)
+ {
+ Memory::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Pipe(const Pipe& pipe) : Memory(pipe) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Pipe& operator = (const Pipe &pipe)
+ {
+ Memory::operator=(pipe);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Pipe(Pipe&& pipe) CL_HPP_NOEXCEPT_ : Memory(std::move(pipe)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Pipe& operator = (Pipe &&pipe)
+ {
+ Memory::operator=(std::move(pipe));
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetMemObjectInfo().
+ template <typename T>
+ cl_int getInfo(cl_pipe_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetPipeInfo, object_, name, param),
+ __GET_PIPE_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetMemObjectInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_pipe_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_pipe_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+}; // class Pipe
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+
+/*! \brief Class interface for cl_sampler.
+ *
+ * \note Copies of these objects are shallow, meaning that the copy will refer
+ * to the same underlying cl_sampler as the original. For details, see
+ * clRetainSampler() and clReleaseSampler().
+ *
+ * \see cl_sampler
+ */
+class Sampler : public detail::Wrapper<cl_sampler>
+{
+public:
+ //! \brief Default constructor - initializes to NULL.
+ Sampler() { }
+
+ /*! \brief Constructs a Sampler in a specified context.
+ *
+ * Wraps clCreateSampler().
+ */
+ Sampler(
+ const Context& context,
+ cl_bool normalized_coords,
+ cl_addressing_mode addressing_mode,
+ cl_filter_mode filter_mode,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ cl_sampler_properties sampler_properties[] = {
+ CL_SAMPLER_NORMALIZED_COORDS, normalized_coords,
+ CL_SAMPLER_ADDRESSING_MODE, addressing_mode,
+ CL_SAMPLER_FILTER_MODE, filter_mode,
+ 0 };
+ object_ = ::clCreateSamplerWithProperties(
+ context(),
+ sampler_properties,
+ &error);
+
+ detail::errHandler(error, __CREATE_SAMPLER_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#else
+ object_ = ::clCreateSampler(
+ context(),
+ normalized_coords,
+ addressing_mode,
+ filter_mode,
+ &error);
+
+ detail::errHandler(error, __CREATE_SAMPLER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#endif
+ }
+
+ /*! \brief Constructor from cl_sampler - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * This effectively transfers ownership of a refcount on the cl_sampler
+ * into the new Sampler object.
+ */
+ explicit Sampler(const cl_sampler& sampler, bool retainObject = false) :
+ detail::Wrapper<cl_type>(sampler, retainObject) { }
+
+ /*! \brief Assignment operator from cl_sampler - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the rhs and calls
+ * clReleaseSampler() on the value previously held by this instance.
+ */
+ Sampler& operator = (const cl_sampler& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Sampler(const Sampler& sam) : detail::Wrapper<cl_type>(sam) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Sampler& operator = (const Sampler &sam)
+ {
+ detail::Wrapper<cl_type>::operator=(sam);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Sampler(Sampler&& sam) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(sam)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Sampler& operator = (Sampler &&sam)
+ {
+ detail::Wrapper<cl_type>::operator=(std::move(sam));
+ return *this;
+ }
+
+ //! \brief Wrapper for clGetSamplerInfo().
+ template <typename T>
+ cl_int getInfo(cl_sampler_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetSamplerInfo, object_, name, param),
+ __GET_SAMPLER_INFO_ERR);
+ }
+
+ //! \brief Wrapper for clGetSamplerInfo() that returns by value.
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_sampler_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_sampler_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+};
+
+class Program;
+class CommandQueue;
+class DeviceCommandQueue;
+class Kernel;
+
+//! \brief Class interface for specifying NDRange values.
+class NDRange
+{
+private:
+ size_type sizes_[3];
+ cl_uint dimensions_;
+
+public:
+ //! \brief Default constructor - resulting range has zero dimensions.
+ NDRange()
+ : dimensions_(0)
+ {
+ sizes_[0] = 0;
+ sizes_[1] = 0;
+ sizes_[2] = 0;
+ }
+
+ //! \brief Constructs one-dimensional range.
+ NDRange(size_type size0)
+ : dimensions_(1)
+ {
+ sizes_[0] = size0;
+ sizes_[1] = 1;
+ sizes_[2] = 1;
+ }
+
+ //! \brief Constructs two-dimensional range.
+ NDRange(size_type size0, size_type size1)
+ : dimensions_(2)
+ {
+ sizes_[0] = size0;
+ sizes_[1] = size1;
+ sizes_[2] = 1;
+ }
+
+ //! \brief Constructs three-dimensional range.
+ NDRange(size_type size0, size_type size1, size_type size2)
+ : dimensions_(3)
+ {
+ sizes_[0] = size0;
+ sizes_[1] = size1;
+ sizes_[2] = size2;
+ }
+
+ /*! \brief Conversion operator to const size_type *.
+ *
+ * \returns a pointer to the size of the first dimension.
+ */
+ operator const size_type*() const {
+ return sizes_;
+ }
+
+ //! \brief Queries the number of dimensions in the range.
+ size_type dimensions() const
+ {
+ return dimensions_;
+ }
+
+ //! \brief Returns the size of the object in bytes based on the
+ // runtime number of dimensions
+ size_type size() const
+ {
+ return dimensions_*sizeof(size_type);
+ }
+
+ size_type* get()
+ {
+ return sizes_;
+ }
+
+ const size_type* get() const
+ {
+ return sizes_;
+ }
+};
+
+//! \brief A zero-dimensional range.
+static const NDRange NullRange;
+
+//! \brief Local address wrapper for use with Kernel::setArg
+struct LocalSpaceArg
+{
+ size_type size_;
+};
+
+namespace detail {
+
+template <typename T, class Enable = void>
+struct KernelArgumentHandler;
+
+// Enable for objects that are not subclasses of memory
+// Pointers, constants etc
+template <typename T>
+struct KernelArgumentHandler<T, typename std::enable_if<!std::is_base_of<cl::Memory, T>::value>::type>
+{
+ static size_type size(const T&) { return sizeof(T); }
+ static const T* ptr(const T& value) { return &value; }
+};
+
+// Enable for subclasses of memory where we want to get a reference to the cl_mem out
+// and pass that in for safety
+template <typename T>
+struct KernelArgumentHandler<T, typename std::enable_if<std::is_base_of<cl::Memory, T>::value>::type>
+{
+ static size_type size(const T&) { return sizeof(cl_mem); }
+ static const cl_mem* ptr(const T& value) { return &(value()); }
+};
+
+// Specialization for DeviceCommandQueue defined later
+
+template <>
+struct KernelArgumentHandler<LocalSpaceArg, void>
+{
+ static size_type size(const LocalSpaceArg& value) { return value.size_; }
+ static const void* ptr(const LocalSpaceArg&) { return NULL; }
+};
+
+}
+//! \endcond
+
+/*! Local
+ * \brief Helper function for generating LocalSpaceArg objects.
+ */
+inline LocalSpaceArg
+Local(size_type size)
+{
+ LocalSpaceArg ret = { size };
+ return ret;
+}
+
+/*! \brief Class interface for cl_kernel.
+ *
+ * \note Copies of these objects are shallow, meaning that the copy will refer
+ * to the same underlying cl_kernel as the original. For details, see
+ * clRetainKernel() and clReleaseKernel().
+ *
+ * \see cl_kernel
+ */
+class Kernel : public detail::Wrapper<cl_kernel>
+{
+public:
+ inline Kernel(const Program& program, const char* name, cl_int* err = NULL);
+
+ //! \brief Default constructor - initializes to NULL.
+ Kernel() { }
+
+ /*! \brief Constructor from cl_kernel - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ * This effectively transfers ownership of a refcount on the cl_kernel
+ * into the new Kernel object.
+ */
+ explicit Kernel(const cl_kernel& kernel, bool retainObject = false) :
+ detail::Wrapper<cl_type>(kernel, retainObject) { }
+
+ /*! \brief Assignment operator from cl_kernel - takes ownership.
+ *
+ * This effectively transfers ownership of a refcount on the rhs and calls
+ * clReleaseKernel() on the value previously held by this instance.
+ */
+ Kernel& operator = (const cl_kernel& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Kernel(const Kernel& kernel) : detail::Wrapper<cl_type>(kernel) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Kernel& operator = (const Kernel &kernel)
+ {
+ detail::Wrapper<cl_type>::operator=(kernel);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Kernel(Kernel&& kernel) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(kernel)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Kernel& operator = (Kernel &&kernel)
+ {
+ detail::Wrapper<cl_type>::operator=(std::move(kernel));
+ return *this;
+ }
+
+ template <typename T>
+ cl_int getInfo(cl_kernel_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetKernelInfo, object_, name, param),
+ __GET_KERNEL_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_kernel_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_kernel_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ template <typename T>
+ cl_int getArgInfo(cl_uint argIndex, cl_kernel_arg_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetKernelArgInfo, object_, argIndex, name, param),
+ __GET_KERNEL_ARG_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_kernel_arg_info, name>::param_type
+ getArgInfo(cl_uint argIndex, cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_kernel_arg_info, name>::param_type param;
+ cl_int result = getArgInfo(argIndex, name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+ template <typename T>
+ cl_int getWorkGroupInfo(
+ const Device& device, cl_kernel_work_group_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(
+ &::clGetKernelWorkGroupInfo, object_, device(), name, param),
+ __GET_KERNEL_WORK_GROUP_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_kernel_work_group_info, name>::param_type
+ getWorkGroupInfo(const Device& device, cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_kernel_work_group_info, name>::param_type param;
+ cl_int result = getWorkGroupInfo(device, name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+#if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
+ cl_int getSubGroupInfo(const cl::Device &dev, cl_kernel_sub_group_info name, const cl::NDRange &range, size_type* param) const
+ {
+ typedef clGetKernelSubGroupInfoKHR_fn PFN_clGetKernelSubGroupInfoKHR;
+ static PFN_clGetKernelSubGroupInfoKHR pfn_clGetKernelSubGroupInfoKHR = NULL;
+ CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetKernelSubGroupInfoKHR);
+
+ return detail::errHandler(
+ pfn_clGetKernelSubGroupInfoKHR(object_, dev(), name, range.size(), range.get(), sizeof(size_type), param, nullptr),
+ __GET_KERNEL_ARG_INFO_ERR);
+ }
+
+ template <cl_int name>
+ size_type getSubGroupInfo(const cl::Device &dev, const cl::NDRange &range, cl_int* err = NULL) const
+ {
+ size_type param;
+ cl_int result = getSubGroupInfo(dev, name, range, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+#endif // #if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR)
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ /*! \brief setArg overload taking a shared_ptr type
+ */
+ template<typename T, class D>
+ cl_int setArg(cl_uint index, const cl::pointer<T, D> &argPtr)
+ {
+ return detail::errHandler(
+ ::clSetKernelArgSVMPointer(object_, index, argPtr.get()),
+ __SET_KERNEL_ARGS_ERR);
+ }
+
+ /*! \brief setArg overload taking a vector type.
+ */
+ template<typename T, class Alloc>
+ cl_int setArg(cl_uint index, const cl::vector<T, Alloc> &argPtr)
+ {
+ return detail::errHandler(
+ ::clSetKernelArgSVMPointer(object_, index, argPtr.data()),
+ __SET_KERNEL_ARGS_ERR);
+ }
+
+ /*! \brief setArg overload taking a pointer type
+ */
+ template<typename T>
+ typename std::enable_if<std::is_pointer<T>::value, cl_int>::type
+ setArg(cl_uint index, const T argPtr)
+ {
+ return detail::errHandler(
+ ::clSetKernelArgSVMPointer(object_, index, argPtr),
+ __SET_KERNEL_ARGS_ERR);
+ }
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+ /*! \brief setArg overload taking a POD type
+ */
+ template <typename T>
+ typename std::enable_if<!std::is_pointer<T>::value, cl_int>::type
+ setArg(cl_uint index, const T &value)
+ {
+ return detail::errHandler(
+ ::clSetKernelArg(
+ object_,
+ index,
+ detail::KernelArgumentHandler<T>::size(value),
+ detail::KernelArgumentHandler<T>::ptr(value)),
+ __SET_KERNEL_ARGS_ERR);
+ }
+
+ cl_int setArg(cl_uint index, size_type size, const void* argPtr)
+ {
+ return detail::errHandler(
+ ::clSetKernelArg(object_, index, size, argPtr),
+ __SET_KERNEL_ARGS_ERR);
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ /*!
+ * Specify a vector of SVM pointers that the kernel may access in
+ * addition to its arguments.
+ */
+ cl_int setSVMPointers(const vector<void*> &pointerList)
+ {
+ return detail::errHandler(
+ ::clSetKernelExecInfo(
+ object_,
+ CL_KERNEL_EXEC_INFO_SVM_PTRS,
+ sizeof(void*)*pointerList.size(),
+ pointerList.data()));
+ }
+
+ /*!
+ * Specify a std::array of SVM pointers that the kernel may access in
+ * addition to its arguments.
+ */
+ template<int ArrayLength>
+ cl_int setSVMPointers(const std::array<void*, ArrayLength> &pointerList)
+ {
+ return detail::errHandler(
+ ::clSetKernelExecInfo(
+ object_,
+ CL_KERNEL_EXEC_INFO_SVM_PTRS,
+ sizeof(void*)*pointerList.size(),
+ pointerList.data()));
+ }
+
+ /*! \brief Enable fine-grained system SVM.
+ *
+ * \note It is only possible to enable fine-grained system SVM if all devices
+ * in the context associated with kernel support it.
+ *
+ * \param svmEnabled True if fine-grained system SVM is requested. False otherwise.
+ * \return CL_SUCCESS if the function was executed succesfully. CL_INVALID_OPERATION
+ * if no devices in the context support fine-grained system SVM.
+ *
+ * \see clSetKernelExecInfo
+ */
+ cl_int enableFineGrainedSystemSVM(bool svmEnabled)
+ {
+ cl_bool svmEnabled_ = svmEnabled ? CL_TRUE : CL_FALSE;
+ return detail::errHandler(
+ ::clSetKernelExecInfo(
+ object_,
+ CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM,
+ sizeof(cl_bool),
+ &svmEnabled_
+ )
+ );
+ }
+
+ template<int index, int ArrayLength, class D, typename T0, typename... Ts>
+ void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0, Ts... ts)
+ {
+ pointerList[index] = static_cast<void*>(t0.get());
+ setSVMPointersHelper<index + 1, Ts...>(ts...);
+ }
+
+ template<int index, int ArrayLength, typename T0, typename... Ts>
+ typename std::enable_if<std::is_pointer<T0>::value, void>::type
+ setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0, Ts... ts)
+ {
+ pointerList[index] = static_cast<void*>(t0);
+ setSVMPointersHelper<index + 1, Ts...>(ts...);
+ }
+
+ template<int index, int ArrayLength, typename T0, class D>
+ void setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, const pointer<T0, D> &t0)
+ {
+ pointerList[index] = static_cast<void*>(t0.get());
+ }
+
+ template<int index, int ArrayLength, typename T0>
+ typename std::enable_if<std::is_pointer<T0>::value, void>::type
+ setSVMPointersHelper(std::array<void*, ArrayLength> &pointerList, T0 t0)
+ {
+ pointerList[index] = static_cast<void*>(t0);
+ }
+
+ template<typename T0, typename... Ts>
+ cl_int setSVMPointers(const T0 &t0, Ts... ts)
+ {
+ std::array<void*, 1 + sizeof...(Ts)> pointerList;
+
+ setSVMPointersHelper<0, 1 + sizeof...(Ts)>(pointerList, t0, ts...);
+ return detail::errHandler(
+ ::clSetKernelExecInfo(
+ object_,
+ CL_KERNEL_EXEC_INFO_SVM_PTRS,
+ sizeof(void*)*(1 + sizeof...(Ts)),
+ pointerList.data()));
+ }
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+};
+
+/*! \class Program
+ * \brief Program interface that implements cl_program.
+ */
+class Program : public detail::Wrapper<cl_program>
+{
+public:
+#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+ typedef vector<vector<unsigned char>> Binaries;
+ typedef vector<string> Sources;
+#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+ typedef vector<std::pair<const void*, size_type> > Binaries;
+ typedef vector<std::pair<const char*, size_type> > Sources;
+#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+
+ Program(
+ const string& source,
+ bool build = false,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ const char * strings = source.c_str();
+ const size_type length = source.size();
+
+ Context context = Context::getDefault(err);
+
+ object_ = ::clCreateProgramWithSource(
+ context(), (cl_uint)1, &strings, &length, &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
+
+ if (error == CL_SUCCESS && build) {
+
+ error = ::clBuildProgram(
+ object_,
+ 0,
+ NULL,
+#if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
+ "-cl-std=CL2.0",
+#else
+ "",
+#endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
+ NULL,
+ NULL);
+
+ detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
+ }
+
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ Program(
+ const Context& context,
+ const string& source,
+ bool build = false,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ const char * strings = source.c_str();
+ const size_type length = source.size();
+
+ object_ = ::clCreateProgramWithSource(
+ context(), (cl_uint)1, &strings, &length, &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
+
+ if (error == CL_SUCCESS && build) {
+ error = ::clBuildProgram(
+ object_,
+ 0,
+ NULL,
+#if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
+ "-cl-std=CL2.0",
+#else
+ "",
+#endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD)
+ NULL,
+ NULL);
+
+ detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
+ }
+
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /**
+ * Create a program from a vector of source strings and the default context.
+ * Does not compile or link the program.
+ */
+ Program(
+ const Sources& sources,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ Context context = Context::getDefault(err);
+
+ const size_type n = (size_type)sources.size();
+
+ vector<size_type> lengths(n);
+ vector<const char*> strings(n);
+
+ for (size_type i = 0; i < n; ++i) {
+#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+ strings[i] = sources[(int)i].data();
+ lengths[i] = sources[(int)i].length();
+#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+ strings[i] = sources[(int)i].first;
+ lengths[i] = sources[(int)i].second;
+#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+ }
+
+ object_ = ::clCreateProgramWithSource(
+ context(), (cl_uint)n, strings.data(), lengths.data(), &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /**
+ * Create a program from a vector of source strings and a provided context.
+ * Does not compile or link the program.
+ */
+ Program(
+ const Context& context,
+ const Sources& sources,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ const size_type n = (size_type)sources.size();
+
+ vector<size_type> lengths(n);
+ vector<const char*> strings(n);
+
+ for (size_type i = 0; i < n; ++i) {
+#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+ strings[i] = sources[(int)i].data();
+ lengths[i] = sources[(int)i].length();
+#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+ strings[i] = sources[(int)i].first;
+ lengths[i] = sources[(int)i].second;
+#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+ }
+
+ object_ = ::clCreateProgramWithSource(
+ context(), (cl_uint)n, strings.data(), lengths.data(), &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /**
+ * Construct a program object from a list of devices and a per-device list of binaries.
+ * \param context A valid OpenCL context in which to construct the program.
+ * \param devices A vector of OpenCL device objects for which the program will be created.
+ * \param binaries A vector of pairs of a pointer to a binary object and its length.
+ * \param binaryStatus An optional vector that on completion will be resized to
+ * match the size of binaries and filled with values to specify if each binary
+ * was successfully loaded.
+ * Set to CL_SUCCESS if the binary was successfully loaded.
+ * Set to CL_INVALID_VALUE if the length is 0 or the binary pointer is NULL.
+ * Set to CL_INVALID_BINARY if the binary provided is not valid for the matching device.
+ * \param err if non-NULL will be set to CL_SUCCESS on successful operation or one of the following errors:
+ * CL_INVALID_CONTEXT if context is not a valid context.
+ * CL_INVALID_VALUE if the length of devices is zero; or if the length of binaries does not match the length of devices;
+ * or if any entry in binaries is NULL or has length 0.
+ * CL_INVALID_DEVICE if OpenCL devices listed in devices are not in the list of devices associated with context.
+ * CL_INVALID_BINARY if an invalid program binary was encountered for any device. binaryStatus will return specific status for each device.
+ * CL_OUT_OF_HOST_MEMORY if there is a failure to allocate resources required by the OpenCL implementation on the host.
+ */
+ Program(
+ const Context& context,
+ const vector<Device>& devices,
+ const Binaries& binaries,
+ vector<cl_int>* binaryStatus = NULL,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ const size_type numDevices = devices.size();
+
+ // Catch size mismatch early and return
+ if(binaries.size() != numDevices) {
+ error = CL_INVALID_VALUE;
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ return;
+ }
+
+
+ vector<size_type> lengths(numDevices);
+ vector<const unsigned char*> images(numDevices);
+#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+ for (size_type i = 0; i < numDevices; ++i) {
+ images[i] = binaries[i].data();
+ lengths[i] = binaries[(int)i].size();
+ }
+#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+ for (size_type i = 0; i < numDevices; ++i) {
+ images[i] = (const unsigned char*)binaries[i].first;
+ lengths[i] = binaries[(int)i].second;
+ }
+#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY)
+
+ vector<cl_device_id> deviceIDs(numDevices);
+ for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
+ deviceIDs[deviceIndex] = (devices[deviceIndex])();
+ }
+
+ if(binaryStatus) {
+ binaryStatus->resize(numDevices);
+ }
+
+ object_ = ::clCreateProgramWithBinary(
+ context(), (cl_uint) devices.size(),
+ deviceIDs.data(),
+ lengths.data(), images.data(), (binaryStatus != NULL && numDevices > 0)
+ ? &binaryStatus->front()
+ : NULL, &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ /**
+ * Create program using builtin kernels.
+ * \param kernelNames Semi-colon separated list of builtin kernel names
+ */
+ Program(
+ const Context& context,
+ const vector<Device>& devices,
+ const string& kernelNames,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+
+ size_type numDevices = devices.size();
+ vector<cl_device_id> deviceIDs(numDevices);
+ for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
+ deviceIDs[deviceIndex] = (devices[deviceIndex])();
+ }
+
+ object_ = ::clCreateProgramWithBuiltInKernels(
+ context(),
+ (cl_uint) devices.size(),
+ deviceIDs.data(),
+ kernelNames.c_str(),
+ &error);
+
+ detail::errHandler(error, __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+ Program() { }
+
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ */
+ explicit Program(const cl_program& program, bool retainObject = false) :
+ detail::Wrapper<cl_type>(program, retainObject) { }
+
+ Program& operator = (const cl_program& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Program(const Program& program) : detail::Wrapper<cl_type>(program) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ Program& operator = (const Program &program)
+ {
+ detail::Wrapper<cl_type>::operator=(program);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Program(Program&& program) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(program)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ Program& operator = (Program &&program)
+ {
+ detail::Wrapper<cl_type>::operator=(std::move(program));
+ return *this;
+ }
+
+ cl_int build(
+ const vector<Device>& devices,
+ const char* options = NULL,
+ void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
+ void* data = NULL) const
+ {
+ size_type numDevices = devices.size();
+ vector<cl_device_id> deviceIDs(numDevices);
+
+ for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
+ deviceIDs[deviceIndex] = (devices[deviceIndex])();
+ }
+
+ cl_int buildError = ::clBuildProgram(
+ object_,
+ (cl_uint)
+ devices.size(),
+ deviceIDs.data(),
+ options,
+ notifyFptr,
+ data);
+
+ return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
+ }
+
+ cl_int build(
+ const char* options = NULL,
+ void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
+ void* data = NULL) const
+ {
+ cl_int buildError = ::clBuildProgram(
+ object_,
+ 0,
+ NULL,
+ options,
+ notifyFptr,
+ data);
+
+
+ return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ cl_int compile(
+ const char* options = NULL,
+ void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
+ void* data = NULL) const
+ {
+ cl_int error = ::clCompileProgram(
+ object_,
+ 0,
+ NULL,
+ options,
+ 0,
+ NULL,
+ NULL,
+ notifyFptr,
+ data);
+ return detail::buildErrHandler(error, __COMPILE_PROGRAM_ERR, getBuildInfo<CL_PROGRAM_BUILD_LOG>());
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+ template <typename T>
+ cl_int getInfo(cl_program_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(&::clGetProgramInfo, object_, name, param),
+ __GET_PROGRAM_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_program_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_program_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ template <typename T>
+ cl_int getBuildInfo(
+ const Device& device, cl_program_build_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(
+ &::clGetProgramBuildInfo, object_, device(), name, param),
+ __GET_PROGRAM_BUILD_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_program_build_info, name>::param_type
+ getBuildInfo(const Device& device, cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_program_build_info, name>::param_type param;
+ cl_int result = getBuildInfo(device, name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ /**
+ * Build info function that returns a vector of device/info pairs for the specified
+ * info type and for all devices in the program.
+ * On an error reading the info for any device, an empty vector of info will be returned.
+ */
+ template <cl_int name>
+ vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
+ getBuildInfo(cl_int *err = NULL) const
+ {
+ cl_int result = CL_SUCCESS;
+
+ auto devs = getInfo<CL_PROGRAM_DEVICES>(&result);
+ vector<std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>>
+ devInfo;
+
+ // If there was an initial error from getInfo return the error
+ if (result != CL_SUCCESS) {
+ if (err != NULL) {
+ *err = result;
+ }
+ return devInfo;
+ }
+
+ for (cl::Device d : devs) {
+ typename detail::param_traits<
+ detail::cl_program_build_info, name>::param_type param;
+ result = getBuildInfo(d, name, &param);
+ devInfo.push_back(
+ std::pair<cl::Device, typename detail::param_traits<detail::cl_program_build_info, name>::param_type>
+ (d, param));
+ if (result != CL_SUCCESS) {
+ // On error, leave the loop and return the error code
+ break;
+ }
+ }
+ if (err != NULL) {
+ *err = result;
+ }
+ if (result != CL_SUCCESS) {
+ devInfo.clear();
+ }
+ return devInfo;
+ }
+
+ cl_int createKernels(vector<Kernel>* kernels)
+ {
+ cl_uint numKernels;
+ cl_int err = ::clCreateKernelsInProgram(object_, 0, NULL, &numKernels);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
+ }
+
+ vector<cl_kernel> value(numKernels);
+
+ err = ::clCreateKernelsInProgram(
+ object_, numKernels, value.data(), NULL);
+ if (err != CL_SUCCESS) {
+ return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR);
+ }
+
+ if (kernels) {
+ kernels->resize(value.size());
+
+ // Assign to param, constructing with retain behaviour
+ // to correctly capture each underlying CL object
+ for (size_type i = 0; i < value.size(); i++) {
+ // We do not need to retain because this kernel is being created
+ // by the runtime
+ (*kernels)[i] = Kernel(value[i], false);
+ }
+ }
+ return CL_SUCCESS;
+ }
+};
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+inline Program linkProgram(
+ Program input1,
+ Program input2,
+ const char* options = NULL,
+ void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
+ void* data = NULL,
+ cl_int* err = NULL)
+{
+ cl_int error_local = CL_SUCCESS;
+
+ cl_program programs[2] = { input1(), input2() };
+
+ Context ctx = input1.getInfo<CL_PROGRAM_CONTEXT>(&error_local);
+ if(error_local!=CL_SUCCESS) {
+ detail::errHandler(error_local, __LINK_PROGRAM_ERR);
+ }
+
+ cl_program prog = ::clLinkProgram(
+ ctx(),
+ 0,
+ NULL,
+ options,
+ 2,
+ programs,
+ notifyFptr,
+ data,
+ &error_local);
+
+ detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
+ if (err != NULL) {
+ *err = error_local;
+ }
+
+ return Program(prog);
+}
+
+inline Program linkProgram(
+ vector<Program> inputPrograms,
+ const char* options = NULL,
+ void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL,
+ void* data = NULL,
+ cl_int* err = NULL)
+{
+ cl_int error_local = CL_SUCCESS;
+
+ vector<cl_program> programs(inputPrograms.size());
+
+ for (unsigned int i = 0; i < inputPrograms.size(); i++) {
+ programs[i] = inputPrograms[i]();
+ }
+
+ Context ctx;
+ if(inputPrograms.size() > 0) {
+ ctx = inputPrograms[0].getInfo<CL_PROGRAM_CONTEXT>(&error_local);
+ if(error_local!=CL_SUCCESS) {
+ detail::errHandler(error_local, __LINK_PROGRAM_ERR);
+ }
+ }
+ cl_program prog = ::clLinkProgram(
+ ctx(),
+ 0,
+ NULL,
+ options,
+ (cl_uint)inputPrograms.size(),
+ programs.data(),
+ notifyFptr,
+ data,
+ &error_local);
+
+ detail::errHandler(error_local,__COMPILE_PROGRAM_ERR);
+ if (err != NULL) {
+ *err = error_local;
+ }
+
+ return Program(prog, false);
+}
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+// Template specialization for CL_PROGRAM_BINARIES
+template <>
+inline cl_int cl::Program::getInfo(cl_program_info name, vector<vector<unsigned char>>* param) const
+{
+ if (name != CL_PROGRAM_BINARIES) {
+ return CL_INVALID_VALUE;
+ }
+ if (param) {
+ // Resize the parameter array appropriately for each allocation
+ // and pass down to the helper
+
+ vector<size_type> sizes = getInfo<CL_PROGRAM_BINARY_SIZES>();
+ size_type numBinaries = sizes.size();
+
+ // Resize the parameter array and constituent arrays
+ param->resize(numBinaries);
+ for (int i = 0; i < numBinaries; ++i) {
+ (*param)[i].resize(sizes[i]);
+ }
+
+ return detail::errHandler(
+ detail::getInfo(&::clGetProgramInfo, object_, name, param),
+ __GET_PROGRAM_INFO_ERR);
+ }
+
+ return CL_SUCCESS;
+}
+
+template<>
+inline vector<vector<unsigned char>> cl::Program::getInfo<CL_PROGRAM_BINARIES>(cl_int* err) const
+{
+ vector<vector<unsigned char>> binariesVectors;
+
+ cl_int result = getInfo(CL_PROGRAM_BINARIES, &binariesVectors);
+ if (err != NULL) {
+ *err = result;
+ }
+ return binariesVectors;
+}
+
+inline Kernel::Kernel(const Program& program, const char* name, cl_int* err)
+{
+ cl_int error;
+
+ object_ = ::clCreateKernel(program(), name, &error);
+ detail::errHandler(error, __CREATE_KERNEL_ERR);
+
+ if (err != NULL) {
+ *err = error;
+ }
+
+}
+
+enum class QueueProperties : cl_command_queue_properties
+{
+ None = 0,
+ Profiling = CL_QUEUE_PROFILING_ENABLE,
+ OutOfOrder = CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE,
+};
+
+inline QueueProperties operator|(QueueProperties lhs, QueueProperties rhs)
+{
+ return static_cast<QueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
+}
+
+/*! \class CommandQueue
+ * \brief CommandQueue interface for cl_command_queue.
+ */
+class CommandQueue : public detail::Wrapper<cl_command_queue>
+{
+private:
+ static std::once_flag default_initialized_;
+ static CommandQueue default_;
+ static cl_int default_error_;
+
+ /*! \brief Create the default command queue returned by @ref getDefault.
+ *
+ * It sets default_error_ to indicate success or failure. It does not throw
+ * @c cl::Error.
+ */
+ static void makeDefault()
+ {
+ /* We don't want to throw an error from this function, so we have to
+ * catch and set the error flag.
+ */
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ try
+#endif
+ {
+ int error;
+ Context context = Context::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ default_error_ = error;
+ }
+ else {
+ Device device = Device::getDefault();
+ default_ = CommandQueue(context, device, 0, &default_error_);
+ }
+ }
+#if defined(CL_HPP_ENABLE_EXCEPTIONS)
+ catch (cl::Error &e) {
+ default_error_ = e.err();
+ }
+#endif
+ }
+
+ /*! \brief Create the default command queue.
+ *
+ * This sets @c default_. It does not throw
+ * @c cl::Error.
+ */
+ static void makeDefaultProvided(const CommandQueue &c) {
+ default_ = c;
+ }
+
+public:
+#ifdef CL_HPP_UNIT_TEST_ENABLE
+ /*! \brief Reset the default.
+ *
+ * This sets @c default_ to an empty value to support cleanup in
+ * the unit test framework.
+ * This function is not thread safe.
+ */
+ static void unitTestClearDefault() {
+ default_ = CommandQueue();
+ }
+#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE
+
+
+ /*!
+ * \brief Constructs a CommandQueue based on passed properties.
+ * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
+ */
+ CommandQueue(
+ cl_command_queue_properties properties,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ Context context = Context::getDefault(&error);
+ detail::errHandler(error, __CREATE_CONTEXT_ERR);
+
+ if (error != CL_SUCCESS) {
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+ else {
+ Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, properties, 0 };
+ if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
+ object_ = ::clCreateCommandQueueWithProperties(
+ context(), device(), queue_properties, &error);
+ }
+ else {
+ error = CL_INVALID_QUEUE_PROPERTIES;
+ }
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#else
+ object_ = ::clCreateCommandQueue(
+ context(), device(), properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#endif
+ }
+ }
+
+ /*!
+ * \brief Constructs a CommandQueue based on passed properties.
+ * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
+ */
+ CommandQueue(
+ QueueProperties properties,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ Context context = Context::getDefault(&error);
+ detail::errHandler(error, __CREATE_CONTEXT_ERR);
+
+ if (error != CL_SUCCESS) {
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+ else {
+ Device device = context.getInfo<CL_CONTEXT_DEVICES>()[0];
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
+
+ object_ = ::clCreateCommandQueueWithProperties(
+ context(), device(), queue_properties, &error);
+
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#else
+ object_ = ::clCreateCommandQueue(
+ context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#endif
+ }
+ }
+
+ /*!
+ * \brief Constructs a CommandQueue for an implementation defined device in the given context
+ * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
+ */
+ explicit CommandQueue(
+ const Context& context,
+ cl_command_queue_properties properties = 0,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ vector<cl::Device> devices;
+ error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
+
+ detail::errHandler(error, __CREATE_CONTEXT_ERR);
+
+ if (error != CL_SUCCESS)
+ {
+ if (err != NULL) {
+ *err = error;
+ }
+ return;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, properties, 0 };
+ if ((properties & CL_QUEUE_ON_DEVICE) == 0) {
+ object_ = ::clCreateCommandQueueWithProperties(
+ context(), devices[0](), queue_properties, &error);
+ }
+ else {
+ error = CL_INVALID_QUEUE_PROPERTIES;
+ }
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#else
+ object_ = ::clCreateCommandQueue(
+ context(), devices[0](), properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#endif
+
+ }
+
+ /*!
+ * \brief Constructs a CommandQueue for an implementation defined device in the given context
+ * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
+ */
+ explicit CommandQueue(
+ const Context& context,
+ QueueProperties properties,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+ vector<cl::Device> devices;
+ error = context.getInfo(CL_CONTEXT_DEVICES, &devices);
+
+ detail::errHandler(error, __CREATE_CONTEXT_ERR);
+
+ if (error != CL_SUCCESS)
+ {
+ if (err != NULL) {
+ *err = error;
+ }
+ return;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
+ object_ = ::clCreateCommandQueueWithProperties(
+ context(), devices[0](), queue_properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#else
+ object_ = ::clCreateCommandQueue(
+ context(), devices[0](), static_cast<cl_command_queue_properties>(properties), &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#endif
+
+ }
+
+ /*!
+ * \brief Constructs a CommandQueue for a passed device and context
+ * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
+ */
+ CommandQueue(
+ const Context& context,
+ const Device& device,
+ cl_command_queue_properties properties = 0,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, properties, 0 };
+ object_ = ::clCreateCommandQueueWithProperties(
+ context(), device(), queue_properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#else
+ object_ = ::clCreateCommandQueue(
+ context(), device(), properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#endif
+ }
+
+ /*!
+ * \brief Constructs a CommandQueue for a passed device and context
+ * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified.
+ */
+ CommandQueue(
+ const Context& context,
+ const Device& device,
+ QueueProperties properties,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, static_cast<cl_queue_properties>(properties), 0 };
+ object_ = ::clCreateCommandQueueWithProperties(
+ context(), device(), queue_properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#else
+ object_ = ::clCreateCommandQueue(
+ context(), device(), static_cast<cl_command_queue_properties>(properties), &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+#endif
+ }
+
+ static CommandQueue getDefault(cl_int * err = NULL)
+ {
+ std::call_once(default_initialized_, makeDefault);
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+#else // CL_HPP_TARGET_OPENCL_VERSION >= 200
+ detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_ERR);
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200
+ if (err != NULL) {
+ *err = default_error_;
+ }
+ return default_;
+ }
+
+ /**
+ * Modify the default command queue to be used by
+ * subsequent operations.
+ * Will only set the default if no default was previously created.
+ * @return updated default command queue.
+ * Should be compared to the passed value to ensure that it was updated.
+ */
+ static CommandQueue setDefault(const CommandQueue &default_queue)
+ {
+ std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_queue));
+ detail::errHandler(default_error_);
+ return default_;
+ }
+
+ CommandQueue() { }
+
+
+ /*! \brief Constructor from cl_mem - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ */
+ explicit CommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
+ detail::Wrapper<cl_type>(commandQueue, retainObject) { }
+
+ CommandQueue& operator = (const cl_command_queue& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ CommandQueue(const CommandQueue& queue) : detail::Wrapper<cl_type>(queue) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ CommandQueue& operator = (const CommandQueue &queue)
+ {
+ detail::Wrapper<cl_type>::operator=(queue);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ CommandQueue(CommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(queue)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ CommandQueue& operator = (CommandQueue &&queue)
+ {
+ detail::Wrapper<cl_type>::operator=(std::move(queue));
+ return *this;
+ }
+
+ template <typename T>
+ cl_int getInfo(cl_command_queue_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(
+ &::clGetCommandQueueInfo, object_, name, param),
+ __GET_COMMAND_QUEUE_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_command_queue_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_command_queue_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ cl_int enqueueReadBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ size_type offset,
+ size_type size,
+ void* ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueReadBuffer(
+ object_, buffer(), blocking, offset, size,
+ ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_READ_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueWriteBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ size_type offset,
+ size_type size,
+ const void* ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueWriteBuffer(
+ object_, buffer(), blocking, offset, size,
+ ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_WRITE_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueCopyBuffer(
+ const Buffer& src,
+ const Buffer& dst,
+ size_type src_offset,
+ size_type dst_offset,
+ size_type size,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueCopyBuffer(
+ object_, src(), dst(), src_offset, dst_offset, size,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQEUE_COPY_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueReadBufferRect(
+ const Buffer& buffer,
+ cl_bool blocking,
+ const array<size_type, 3>& buffer_offset,
+ const array<size_type, 3>& host_offset,
+ const array<size_type, 3>& region,
+ size_type buffer_row_pitch,
+ size_type buffer_slice_pitch,
+ size_type host_row_pitch,
+ size_type host_slice_pitch,
+ void *ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueReadBufferRect(
+ object_,
+ buffer(),
+ blocking,
+ buffer_offset.data(),
+ host_offset.data(),
+ region.data(),
+ buffer_row_pitch,
+ buffer_slice_pitch,
+ host_row_pitch,
+ host_slice_pitch,
+ ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_READ_BUFFER_RECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueWriteBufferRect(
+ const Buffer& buffer,
+ cl_bool blocking,
+ const array<size_type, 3>& buffer_offset,
+ const array<size_type, 3>& host_offset,
+ const array<size_type, 3>& region,
+ size_type buffer_row_pitch,
+ size_type buffer_slice_pitch,
+ size_type host_row_pitch,
+ size_type host_slice_pitch,
+ void *ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueWriteBufferRect(
+ object_,
+ buffer(),
+ blocking,
+ buffer_offset.data(),
+ host_offset.data(),
+ region.data(),
+ buffer_row_pitch,
+ buffer_slice_pitch,
+ host_row_pitch,
+ host_slice_pitch,
+ ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_WRITE_BUFFER_RECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueCopyBufferRect(
+ const Buffer& src,
+ const Buffer& dst,
+ const array<size_type, 3>& src_origin,
+ const array<size_type, 3>& dst_origin,
+ const array<size_type, 3>& region,
+ size_type src_row_pitch,
+ size_type src_slice_pitch,
+ size_type dst_row_pitch,
+ size_type dst_slice_pitch,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueCopyBufferRect(
+ object_,
+ src(),
+ dst(),
+ src_origin.data(),
+ dst_origin.data(),
+ region.data(),
+ src_row_pitch,
+ src_slice_pitch,
+ dst_row_pitch,
+ dst_slice_pitch,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQEUE_COPY_BUFFER_RECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ /**
+ * Enqueue a command to fill a buffer object with a pattern
+ * of a given size. The pattern is specified as a vector type.
+ * \tparam PatternType The datatype of the pattern field.
+ * The pattern type must be an accepted OpenCL data type.
+ * \tparam offset Is the offset in bytes into the buffer at
+ * which to start filling. This must be a multiple of
+ * the pattern size.
+ * \tparam size Is the size in bytes of the region to fill.
+ * This must be a multiple of the pattern size.
+ */
+ template<typename PatternType>
+ cl_int enqueueFillBuffer(
+ const Buffer& buffer,
+ PatternType pattern,
+ size_type offset,
+ size_type size,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueFillBuffer(
+ object_,
+ buffer(),
+ static_cast<void*>(&pattern),
+ sizeof(PatternType),
+ offset,
+ size,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_FILL_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+ cl_int enqueueReadImage(
+ const Image& image,
+ cl_bool blocking,
+ const array<size_type, 3>& origin,
+ const array<size_type, 3>& region,
+ size_type row_pitch,
+ size_type slice_pitch,
+ void* ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueReadImage(
+ object_,
+ image(),
+ blocking,
+ origin.data(),
+ region.data(),
+ row_pitch,
+ slice_pitch,
+ ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_READ_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueWriteImage(
+ const Image& image,
+ cl_bool blocking,
+ const array<size_type, 3>& origin,
+ const array<size_type, 3>& region,
+ size_type row_pitch,
+ size_type slice_pitch,
+ void* ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueWriteImage(
+ object_,
+ image(),
+ blocking,
+ origin.data(),
+ region.data(),
+ row_pitch,
+ slice_pitch,
+ ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_WRITE_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueCopyImage(
+ const Image& src,
+ const Image& dst,
+ const array<size_type, 3>& src_origin,
+ const array<size_type, 3>& dst_origin,
+ const array<size_type, 3>& region,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueCopyImage(
+ object_,
+ src(),
+ dst(),
+ src_origin.data(),
+ dst_origin.data(),
+ region.data(),
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_COPY_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ /**
+ * Enqueue a command to fill an image object with a specified color.
+ * \param fillColor is the color to use to fill the image.
+ * This is a four component RGBA floating-point color value if
+ * the image channel data type is not an unnormalized signed or
+ * unsigned data type.
+ */
+ cl_int enqueueFillImage(
+ const Image& image,
+ cl_float4 fillColor,
+ const array<size_type, 3>& origin,
+ const array<size_type, 3>& region,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueFillImage(
+ object_,
+ image(),
+ static_cast<void*>(&fillColor),
+ origin.data(),
+ region.data(),
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_FILL_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * Enqueue a command to fill an image object with a specified color.
+ * \param fillColor is the color to use to fill the image.
+ * This is a four component RGBA signed integer color value if
+ * the image channel data type is an unnormalized signed integer
+ * type.
+ */
+ cl_int enqueueFillImage(
+ const Image& image,
+ cl_int4 fillColor,
+ const array<size_type, 3>& origin,
+ const array<size_type, 3>& region,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueFillImage(
+ object_,
+ image(),
+ static_cast<void*>(&fillColor),
+ origin.data(),
+ region.data(),
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_FILL_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * Enqueue a command to fill an image object with a specified color.
+ * \param fillColor is the color to use to fill the image.
+ * This is a four component RGBA unsigned integer color value if
+ * the image channel data type is an unnormalized unsigned integer
+ * type.
+ */
+ cl_int enqueueFillImage(
+ const Image& image,
+ cl_uint4 fillColor,
+ const array<size_type, 3>& origin,
+ const array<size_type, 3>& region,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueFillImage(
+ object_,
+ image(),
+ static_cast<void*>(&fillColor),
+ origin.data(),
+ region.data(),
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_FILL_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+ cl_int enqueueCopyImageToBuffer(
+ const Image& src,
+ const Buffer& dst,
+ const array<size_type, 3>& src_origin,
+ const array<size_type, 3>& region,
+ size_type dst_offset,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueCopyImageToBuffer(
+ object_,
+ src(),
+ dst(),
+ src_origin.data(),
+ region.data(),
+ dst_offset,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueCopyBufferToImage(
+ const Buffer& src,
+ const Image& dst,
+ size_type src_offset,
+ const array<size_type, 3>& dst_origin,
+ const array<size_type, 3>& region,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueCopyBufferToImage(
+ object_,
+ src(),
+ dst(),
+ src_offset,
+ dst_origin.data(),
+ region.data(),
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ void* enqueueMapBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ cl_map_flags flags,
+ size_type offset,
+ size_type size,
+ const vector<Event>* events = NULL,
+ Event* event = NULL,
+ cl_int* err = NULL) const
+ {
+ cl_event tmp;
+ cl_int error;
+ void * result = ::clEnqueueMapBuffer(
+ object_, buffer(), blocking, flags, offset, size,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL,
+ &error);
+
+ detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ if (event != NULL && error == CL_SUCCESS)
+ *event = tmp;
+
+ return result;
+ }
+
+ void* enqueueMapImage(
+ const Image& buffer,
+ cl_bool blocking,
+ cl_map_flags flags,
+ const array<size_type, 3>& origin,
+ const array<size_type, 3>& region,
+ size_type * row_pitch,
+ size_type * slice_pitch,
+ const vector<Event>* events = NULL,
+ Event* event = NULL,
+ cl_int* err = NULL) const
+ {
+ cl_event tmp;
+ cl_int error;
+ void * result = ::clEnqueueMapImage(
+ object_, buffer(), blocking, flags,
+ origin.data(),
+ region.data(),
+ row_pitch, slice_pitch,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL,
+ &error);
+
+ detail::errHandler(error, __ENQUEUE_MAP_IMAGE_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ if (event != NULL && error == CL_SUCCESS)
+ *event = tmp;
+ return result;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ /**
+ * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
+ * This variant takes a raw SVM pointer.
+ */
+ template<typename T>
+ cl_int enqueueMapSVM(
+ T* ptr,
+ cl_bool blocking,
+ cl_map_flags flags,
+ size_type size,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(::clEnqueueSVMMap(
+ object_, blocking, flags, static_cast<void*>(ptr), size,
+ (events != NULL) ? (cl_uint)events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_MAP_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+
+ /**
+ * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
+ * This variant takes a cl::pointer instance.
+ */
+ template<typename T, class D>
+ cl_int enqueueMapSVM(
+ cl::pointer<T, D> &ptr,
+ cl_bool blocking,
+ cl_map_flags flags,
+ size_type size,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(::clEnqueueSVMMap(
+ object_, blocking, flags, static_cast<void*>(ptr.get()), size,
+ (events != NULL) ? (cl_uint)events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_MAP_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer.
+ * This variant takes a cl::vector instance.
+ */
+ template<typename T, class Alloc>
+ cl_int enqueueMapSVM(
+ cl::vector<T, Alloc> &container,
+ cl_bool blocking,
+ cl_map_flags flags,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(::clEnqueueSVMMap(
+ object_, blocking, flags, static_cast<void*>(container.data()), container.size(),
+ (events != NULL) ? (cl_uint)events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_MAP_BUFFER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+ cl_int enqueueUnmapMemObject(
+ const Memory& memory,
+ void* mapped_ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueUnmapMemObject(
+ object_, memory(), mapped_ptr,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ /**
+ * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
+ * This variant takes a raw SVM pointer.
+ */
+ template<typename T>
+ cl_int enqueueUnmapSVM(
+ T* ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueSVMUnmap(
+ object_, static_cast<void*>(ptr),
+ (events != NULL) ? (cl_uint)events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
+ * This variant takes a cl::pointer instance.
+ */
+ template<typename T, class D>
+ cl_int enqueueUnmapSVM(
+ cl::pointer<T, D> &ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueSVMUnmap(
+ object_, static_cast<void*>(ptr.get()),
+ (events != NULL) ? (cl_uint)events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime.
+ * This variant takes a cl::vector instance.
+ */
+ template<typename T, class Alloc>
+ cl_int enqueueUnmapSVM(
+ cl::vector<T, Alloc> &container,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueSVMUnmap(
+ object_, static_cast<void*>(container.data()),
+ (events != NULL) ? (cl_uint)events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ /**
+ * Enqueues a marker command which waits for either a list of events to complete,
+ * or all previously enqueued commands to complete.
+ *
+ * Enqueues a marker command which waits for either a list of events to complete,
+ * or if the list is empty it waits for all commands previously enqueued in command_queue
+ * to complete before it completes. This command returns an event which can be waited on,
+ * i.e. this event can be waited on to insure that all events either in the event_wait_list
+ * or all previously enqueued commands, queued before this command to command_queue,
+ * have completed.
+ */
+ cl_int enqueueMarkerWithWaitList(
+ const vector<Event> *events = 0,
+ Event *event = 0)
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueMarkerWithWaitList(
+ object_,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_MARKER_WAIT_LIST_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * A synchronization point that enqueues a barrier operation.
+ *
+ * Enqueues a barrier command which waits for either a list of events to complete,
+ * or if the list is empty it waits for all commands previously enqueued in command_queue
+ * to complete before it completes. This command blocks command execution, that is, any
+ * following commands enqueued after it do not execute until it completes. This command
+ * returns an event which can be waited on, i.e. this event can be waited on to insure that
+ * all events either in the event_wait_list or all previously enqueued commands, queued
+ * before this command to command_queue, have completed.
+ */
+ cl_int enqueueBarrierWithWaitList(
+ const vector<Event> *events = 0,
+ Event *event = 0)
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueBarrierWithWaitList(
+ object_,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_BARRIER_WAIT_LIST_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ /**
+ * Enqueues a command to indicate with which device a set of memory objects
+ * should be associated.
+ */
+ cl_int enqueueMigrateMemObjects(
+ const vector<Memory> &memObjects,
+ cl_mem_migration_flags flags,
+ const vector<Event>* events = NULL,
+ Event* event = NULL
+ )
+ {
+ cl_event tmp;
+
+ vector<cl_mem> localMemObjects(memObjects.size());
+
+ for( int i = 0; i < (int)memObjects.size(); ++i ) {
+ localMemObjects[i] = memObjects[i]();
+ }
+
+
+ cl_int err = detail::errHandler(
+ ::clEnqueueMigrateMemObjects(
+ object_,
+ (cl_uint)memObjects.size(),
+ localMemObjects.data(),
+ flags,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+
+ cl_int enqueueNDRangeKernel(
+ const Kernel& kernel,
+ const NDRange& offset,
+ const NDRange& global,
+ const NDRange& local = NullRange,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueNDRangeKernel(
+ object_, kernel(), (cl_uint) global.dimensions(),
+ offset.dimensions() != 0 ? (const size_type*) offset : NULL,
+ (const size_type*) global,
+ local.dimensions() != 0 ? (const size_type*) local : NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_NDRANGE_KERNEL_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+#if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
+ CL_EXT_PREFIX__VERSION_1_2_DEPRECATED cl_int enqueueTask(
+ const Kernel& kernel,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueTask(
+ object_, kernel(),
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_TASK_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
+
+ cl_int enqueueNativeKernel(
+ void (CL_CALLBACK *userFptr)(void *),
+ std::pair<void*, size_type> args,
+ const vector<Memory>* mem_objects = NULL,
+ const vector<const void*>* mem_locs = NULL,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ size_type elements = 0;
+ if (mem_objects != NULL) {
+ elements = mem_objects->size();
+ }
+ vector<cl_mem> mems(elements);
+ for (unsigned int i = 0; i < elements; i++) {
+ mems[i] = ((*mem_objects)[i])();
+ }
+
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueNativeKernel(
+ object_, userFptr, args.first, args.second,
+ (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
+ mems.data(),
+ (mem_locs != NULL && mem_locs->size() > 0) ? (const void **) &mem_locs->front() : NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_NATIVE_KERNEL);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+/**
+ * Deprecated APIs for 1.2
+ */
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+ CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ cl_int enqueueMarker(Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueMarker(
+ object_,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_MARKER_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ cl_int enqueueWaitForEvents(const vector<Event>& events) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+ {
+ return detail::errHandler(
+ ::clEnqueueWaitForEvents(
+ object_,
+ (cl_uint) events.size(),
+ events.size() > 0 ? (const cl_event*) &events.front() : NULL),
+ __ENQUEUE_WAIT_FOR_EVENTS_ERR);
+ }
+#endif // defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+
+ cl_int enqueueAcquireGLObjects(
+ const vector<Memory>* mem_objects = NULL,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueAcquireGLObjects(
+ object_,
+ (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
+ (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_ACQUIRE_GL_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueReleaseGLObjects(
+ const vector<Memory>* mem_objects = NULL,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueReleaseGLObjects(
+ object_,
+ (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
+ (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_RELEASE_GL_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+#if defined (CL_HPP_USE_DX_INTEROP)
+typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueAcquireD3D10ObjectsKHR)(
+ cl_command_queue command_queue, cl_uint num_objects,
+ const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
+ const cl_event* event_wait_list, cl_event* event);
+typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueReleaseD3D10ObjectsKHR)(
+ cl_command_queue command_queue, cl_uint num_objects,
+ const cl_mem* mem_objects, cl_uint num_events_in_wait_list,
+ const cl_event* event_wait_list, cl_event* event);
+
+ cl_int enqueueAcquireD3D10Objects(
+ const vector<Memory>* mem_objects = NULL,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ static PFN_clEnqueueAcquireD3D10ObjectsKHR pfn_clEnqueueAcquireD3D10ObjectsKHR = NULL;
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ cl_context context = getInfo<CL_QUEUE_CONTEXT>();
+ cl::Device device(getInfo<CL_QUEUE_DEVICE>());
+ cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
+ CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueAcquireD3D10ObjectsKHR);
+#endif
+#if CL_HPP_TARGET_OPENCL_VERSION >= 110
+ CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueAcquireD3D10ObjectsKHR);
+#endif
+
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ pfn_clEnqueueAcquireD3D10ObjectsKHR(
+ object_,
+ (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
+ (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_ACQUIRE_GL_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+
+ cl_int enqueueReleaseD3D10Objects(
+ const vector<Memory>* mem_objects = NULL,
+ const vector<Event>* events = NULL,
+ Event* event = NULL) const
+ {
+ static PFN_clEnqueueReleaseD3D10ObjectsKHR pfn_clEnqueueReleaseD3D10ObjectsKHR = NULL;
+#if CL_HPP_TARGET_OPENCL_VERSION >= 120
+ cl_context context = getInfo<CL_QUEUE_CONTEXT>();
+ cl::Device device(getInfo<CL_QUEUE_DEVICE>());
+ cl_platform_id platform = device.getInfo<CL_DEVICE_PLATFORM>();
+ CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueReleaseD3D10ObjectsKHR);
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120
+#if CL_HPP_TARGET_OPENCL_VERSION >= 110
+ CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueReleaseD3D10ObjectsKHR);
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
+
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ pfn_clEnqueueReleaseD3D10ObjectsKHR(
+ object_,
+ (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0,
+ (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_RELEASE_GL_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+ }
+#endif
+
+/**
+ * Deprecated APIs for 1.2
+ */
+#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
+ CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ cl_int enqueueBarrier() const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+ {
+ return detail::errHandler(
+ ::clEnqueueBarrier(object_),
+ __ENQUEUE_BARRIER_ERR);
+ }
+#endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS
+
+ cl_int flush() const
+ {
+ return detail::errHandler(::clFlush(object_), __FLUSH_ERR);
+ }
+
+ cl_int finish() const
+ {
+ return detail::errHandler(::clFinish(object_), __FINISH_ERR);
+ }
+}; // CommandQueue
+
+CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag CommandQueue::default_initialized_;
+CL_HPP_DEFINE_STATIC_MEMBER_ CommandQueue CommandQueue::default_;
+CL_HPP_DEFINE_STATIC_MEMBER_ cl_int CommandQueue::default_error_ = CL_SUCCESS;
+
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+enum class DeviceQueueProperties : cl_command_queue_properties
+{
+ None = 0,
+ Profiling = CL_QUEUE_PROFILING_ENABLE,
+};
+
+inline DeviceQueueProperties operator|(DeviceQueueProperties lhs, DeviceQueueProperties rhs)
+{
+ return static_cast<DeviceQueueProperties>(static_cast<cl_command_queue_properties>(lhs) | static_cast<cl_command_queue_properties>(rhs));
+}
+
+/*! \class DeviceCommandQueue
+ * \brief DeviceCommandQueue interface for device cl_command_queues.
+ */
+class DeviceCommandQueue : public detail::Wrapper<cl_command_queue>
+{
+public:
+
+ /*!
+ * Trivial empty constructor to create a null queue.
+ */
+ DeviceCommandQueue() { }
+
+ /*!
+ * Default construct device command queue on default context and device
+ */
+ DeviceCommandQueue(DeviceQueueProperties properties, cl_int* err = NULL)
+ {
+ cl_int error;
+ cl::Context context = cl::Context::getDefault();
+ cl::Device device = cl::Device::getDefault();
+
+ cl_command_queue_properties mergedProperties =
+ CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
+
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, mergedProperties, 0 };
+ object_ = ::clCreateCommandQueueWithProperties(
+ context(), device(), queue_properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*!
+ * Create a device command queue for a specified device in the passed context.
+ */
+ DeviceCommandQueue(
+ const Context& context,
+ const Device& device,
+ DeviceQueueProperties properties = DeviceQueueProperties::None,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ cl_command_queue_properties mergedProperties =
+ CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, mergedProperties, 0 };
+ object_ = ::clCreateCommandQueueWithProperties(
+ context(), device(), queue_properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*!
+ * Create a device command queue for a specified device in the passed context.
+ */
+ DeviceCommandQueue(
+ const Context& context,
+ const Device& device,
+ cl_uint queueSize,
+ DeviceQueueProperties properties = DeviceQueueProperties::None,
+ cl_int* err = NULL)
+ {
+ cl_int error;
+
+ cl_command_queue_properties mergedProperties =
+ CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast<cl_command_queue_properties>(properties);
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, mergedProperties,
+ CL_QUEUE_SIZE, queueSize,
+ 0 };
+ object_ = ::clCreateCommandQueueWithProperties(
+ context(), device(), queue_properties, &error);
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+
+ /*! \brief Constructor from cl_command_queue - takes ownership.
+ *
+ * \param retainObject will cause the constructor to retain its cl object.
+ * Defaults to false to maintain compatibility with
+ * earlier versions.
+ */
+ explicit DeviceCommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) :
+ detail::Wrapper<cl_type>(commandQueue, retainObject) { }
+
+ DeviceCommandQueue& operator = (const cl_command_queue& rhs)
+ {
+ detail::Wrapper<cl_type>::operator=(rhs);
+ return *this;
+ }
+
+ /*! \brief Copy constructor to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ DeviceCommandQueue(const DeviceCommandQueue& queue) : detail::Wrapper<cl_type>(queue) {}
+
+ /*! \brief Copy assignment to forward copy to the superclass correctly.
+ * Required for MSVC.
+ */
+ DeviceCommandQueue& operator = (const DeviceCommandQueue &queue)
+ {
+ detail::Wrapper<cl_type>::operator=(queue);
+ return *this;
+ }
+
+ /*! \brief Move constructor to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ DeviceCommandQueue(DeviceCommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper<cl_type>(std::move(queue)) {}
+
+ /*! \brief Move assignment to forward move to the superclass correctly.
+ * Required for MSVC.
+ */
+ DeviceCommandQueue& operator = (DeviceCommandQueue &&queue)
+ {
+ detail::Wrapper<cl_type>::operator=(std::move(queue));
+ return *this;
+ }
+
+ template <typename T>
+ cl_int getInfo(cl_command_queue_info name, T* param) const
+ {
+ return detail::errHandler(
+ detail::getInfo(
+ &::clGetCommandQueueInfo, object_, name, param),
+ __GET_COMMAND_QUEUE_INFO_ERR);
+ }
+
+ template <cl_int name> typename
+ detail::param_traits<detail::cl_command_queue_info, name>::param_type
+ getInfo(cl_int* err = NULL) const
+ {
+ typename detail::param_traits<
+ detail::cl_command_queue_info, name>::param_type param;
+ cl_int result = getInfo(name, &param);
+ if (err != NULL) {
+ *err = result;
+ }
+ return param;
+ }
+
+ /*!
+ * Create a new default device command queue for the default device,
+ * in the default context and of the default size.
+ * If there is already a default queue for the specified device this
+ * function will return the pre-existing queue.
+ */
+ static DeviceCommandQueue makeDefault(
+ cl_int *err = nullptr)
+ {
+ cl_int error;
+ cl::Context context = cl::Context::getDefault();
+ cl::Device device = cl::Device::getDefault();
+
+ cl_command_queue_properties properties =
+ CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, properties,
+ 0 };
+ DeviceCommandQueue deviceQueue(
+ ::clCreateCommandQueueWithProperties(
+ context(), device(), queue_properties, &error));
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ return deviceQueue;
+ }
+
+ /*!
+ * Create a new default device command queue for the specified device
+ * and of the default size.
+ * If there is already a default queue for the specified device this
+ * function will return the pre-existing queue.
+ */
+ static DeviceCommandQueue makeDefault(
+ const Context &context, const Device &device, cl_int *err = nullptr)
+ {
+ cl_int error;
+
+ cl_command_queue_properties properties =
+ CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, properties,
+ 0 };
+ DeviceCommandQueue deviceQueue(
+ ::clCreateCommandQueueWithProperties(
+ context(), device(), queue_properties, &error));
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ return deviceQueue;
+ }
+
+ /*!
+ * Create a new default device command queue for the specified device
+ * and of the requested size in bytes.
+ * If there is already a default queue for the specified device this
+ * function will return the pre-existing queue.
+ */
+ static DeviceCommandQueue makeDefault(
+ const Context &context, const Device &device, cl_uint queueSize, cl_int *err = nullptr)
+ {
+ cl_int error;
+
+ cl_command_queue_properties properties =
+ CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT;
+ cl_queue_properties queue_properties[] = {
+ CL_QUEUE_PROPERTIES, properties,
+ CL_QUEUE_SIZE, queueSize,
+ 0 };
+ DeviceCommandQueue deviceQueue(
+ ::clCreateCommandQueueWithProperties(
+ context(), device(), queue_properties, &error));
+
+ detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ return deviceQueue;
+ }
+}; // DeviceCommandQueue
+
+namespace detail
+{
+ // Specialization for device command queue
+ template <>
+ struct KernelArgumentHandler<cl::DeviceCommandQueue, void>
+ {
+ static size_type size(const cl::DeviceCommandQueue&) { return sizeof(cl_command_queue); }
+ static const cl_command_queue* ptr(const cl::DeviceCommandQueue& value) { return &(value()); }
+ };
+} // namespace detail
+
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+
+template< typename IteratorType >
+Buffer::Buffer(
+ const Context &context,
+ IteratorType startIterator,
+ IteratorType endIterator,
+ bool readOnly,
+ bool useHostPtr,
+ cl_int* err)
+{
+ typedef typename std::iterator_traits<IteratorType>::value_type DataType;
+ cl_int error;
+
+ cl_mem_flags flags = 0;
+ if( readOnly ) {
+ flags |= CL_MEM_READ_ONLY;
+ }
+ else {
+ flags |= CL_MEM_READ_WRITE;
+ }
+ if( useHostPtr ) {
+ flags |= CL_MEM_USE_HOST_PTR;
+ }
+
+ size_type size = sizeof(DataType)*(endIterator - startIterator);
+
+ if( useHostPtr ) {
+ object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
+ } else {
+ object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
+ }
+
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ if( !useHostPtr ) {
+ CommandQueue queue(context, 0, &error);
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ error = cl::copy(queue, startIterator, endIterator, *this);
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+}
+
+template< typename IteratorType >
+Buffer::Buffer(
+ const CommandQueue &queue,
+ IteratorType startIterator,
+ IteratorType endIterator,
+ bool readOnly,
+ bool useHostPtr,
+ cl_int* err)
+{
+ typedef typename std::iterator_traits<IteratorType>::value_type DataType;
+ cl_int error;
+
+ cl_mem_flags flags = 0;
+ if (readOnly) {
+ flags |= CL_MEM_READ_ONLY;
+ }
+ else {
+ flags |= CL_MEM_READ_WRITE;
+ }
+ if (useHostPtr) {
+ flags |= CL_MEM_USE_HOST_PTR;
+ }
+
+ size_type size = sizeof(DataType)*(endIterator - startIterator);
+
+ Context context = queue.getInfo<CL_QUEUE_CONTEXT>();
+
+ if (useHostPtr) {
+ object_ = ::clCreateBuffer(context(), flags, size, static_cast<DataType*>(&*startIterator), &error);
+ }
+ else {
+ object_ = ::clCreateBuffer(context(), flags, size, 0, &error);
+ }
+
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ if (!useHostPtr) {
+ error = cl::copy(queue, startIterator, endIterator, *this);
+ detail::errHandler(error, __CREATE_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ }
+}
+
+inline cl_int enqueueReadBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ size_type offset,
+ size_type size,
+ void* ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueReadBuffer(buffer, blocking, offset, size, ptr, events, event);
+}
+
+inline cl_int enqueueWriteBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ size_type offset,
+ size_type size,
+ const void* ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueWriteBuffer(buffer, blocking, offset, size, ptr, events, event);
+}
+
+inline void* enqueueMapBuffer(
+ const Buffer& buffer,
+ cl_bool blocking,
+ cl_map_flags flags,
+ size_type offset,
+ size_type size,
+ const vector<Event>* events = NULL,
+ Event* event = NULL,
+ cl_int* err = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+
+ void * result = ::clEnqueueMapBuffer(
+ queue(), buffer(), blocking, flags, offset, size,
+ (events != NULL) ? (cl_uint) events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL,
+ (cl_event*) event,
+ &error);
+
+ detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ if (err != NULL) {
+ *err = error;
+ }
+ return result;
+}
+
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+/**
+ * Enqueues to the default queue a command that will allow the host to
+ * update a region of a coarse-grained SVM buffer.
+ * This variant takes a raw SVM pointer.
+ */
+template<typename T>
+inline cl_int enqueueMapSVM(
+ T* ptr,
+ cl_bool blocking,
+ cl_map_flags flags,
+ size_type size,
+ const vector<Event>* events,
+ Event* event)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ if (error != CL_SUCCESS) {
+ return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ }
+
+ return queue.enqueueMapSVM(
+ ptr, blocking, flags, size, events, event);
+}
+
+/**
+ * Enqueues to the default queue a command that will allow the host to
+ * update a region of a coarse-grained SVM buffer.
+ * This variant takes a cl::pointer instance.
+ */
+template<typename T, class D>
+inline cl_int enqueueMapSVM(
+ cl::pointer<T, D> ptr,
+ cl_bool blocking,
+ cl_map_flags flags,
+ size_type size,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ if (error != CL_SUCCESS) {
+ return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ }
+
+ return queue.enqueueMapSVM(
+ ptr, blocking, flags, size, events, event);
+}
+
+/**
+ * Enqueues to the default queue a command that will allow the host to
+ * update a region of a coarse-grained SVM buffer.
+ * This variant takes a cl::vector instance.
+ */
+template<typename T, class Alloc>
+inline cl_int enqueueMapSVM(
+ cl::vector<T, Alloc> container,
+ cl_bool blocking,
+ cl_map_flags flags,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ if (error != CL_SUCCESS) {
+ return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ }
+
+ return queue.enqueueMapSVM(
+ container, blocking, flags, events, event);
+}
+
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+inline cl_int enqueueUnmapMemObject(
+ const Memory& memory,
+ void* mapped_ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR);
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ cl_event tmp;
+ cl_int err = detail::errHandler(
+ ::clEnqueueUnmapMemObject(
+ queue(), memory(), mapped_ptr,
+ (events != NULL) ? (cl_uint)events->size() : 0,
+ (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL,
+ (event != NULL) ? &tmp : NULL),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+
+ if (event != NULL && err == CL_SUCCESS)
+ *event = tmp;
+
+ return err;
+}
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+/**
+ * Enqueues to the default queue a command that will release a coarse-grained
+ * SVM buffer back to the OpenCL runtime.
+ * This variant takes a raw SVM pointer.
+ */
+template<typename T>
+inline cl_int enqueueUnmapSVM(
+ T* ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ if (error != CL_SUCCESS) {
+ return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+ }
+
+ return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+
+}
+
+/**
+ * Enqueues to the default queue a command that will release a coarse-grained
+ * SVM buffer back to the OpenCL runtime.
+ * This variant takes a cl::pointer instance.
+ */
+template<typename T, class D>
+inline cl_int enqueueUnmapSVM(
+ cl::pointer<T, D> &ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ if (error != CL_SUCCESS) {
+ return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+ }
+
+ return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+}
+
+/**
+ * Enqueues to the default queue a command that will release a coarse-grained
+ * SVM buffer back to the OpenCL runtime.
+ * This variant takes a cl::vector instance.
+ */
+template<typename T, class Alloc>
+inline cl_int enqueueUnmapSVM(
+ cl::vector<T, Alloc> &container,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ if (error != CL_SUCCESS) {
+ return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+ }
+
+ return detail::errHandler(queue.enqueueUnmapSVM(container, events, event),
+ __ENQUEUE_UNMAP_MEM_OBJECT_ERR);
+}
+
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+inline cl_int enqueueCopyBuffer(
+ const Buffer& src,
+ const Buffer& dst,
+ size_type src_offset,
+ size_type dst_offset,
+ size_type size,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueCopyBuffer(src, dst, src_offset, dst_offset, size, events, event);
+}
+
+/**
+ * Blocking copy operation between iterators and a buffer.
+ * Host to Device.
+ * Uses default command queue.
+ */
+template< typename IteratorType >
+inline cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ if (error != CL_SUCCESS)
+ return error;
+
+ return cl::copy(queue, startIterator, endIterator, buffer);
+}
+
+/**
+ * Blocking copy operation between iterators and a buffer.
+ * Device to Host.
+ * Uses default command queue.
+ */
+template< typename IteratorType >
+inline cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+ if (error != CL_SUCCESS)
+ return error;
+
+ return cl::copy(queue, buffer, startIterator, endIterator);
+}
+
+/**
+ * Blocking copy operation between iterators and a buffer.
+ * Host to Device.
+ * Uses specified queue.
+ */
+template< typename IteratorType >
+inline cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer )
+{
+ typedef typename std::iterator_traits<IteratorType>::value_type DataType;
+ cl_int error;
+
+ size_type length = endIterator-startIterator;
+ size_type byteLength = length*sizeof(DataType);
+
+ DataType *pointer =
+ static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_WRITE, 0, byteLength, 0, 0, &error));
+ // if exceptions enabled, enqueueMapBuffer will throw
+ if( error != CL_SUCCESS ) {
+ return error;
+ }
+#if defined(_MSC_VER)
+ std::copy(
+ startIterator,
+ endIterator,
+ stdext::checked_array_iterator<DataType*>(
+ pointer, length));
+#else
+ std::copy(startIterator, endIterator, pointer);
+#endif
+ Event endEvent;
+ error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
+ // if exceptions enabled, enqueueUnmapMemObject will throw
+ if( error != CL_SUCCESS ) {
+ return error;
+ }
+ endEvent.wait();
+ return CL_SUCCESS;
+}
+
+/**
+ * Blocking copy operation between iterators and a buffer.
+ * Device to Host.
+ * Uses specified queue.
+ */
+template< typename IteratorType >
+inline cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator )
+{
+ typedef typename std::iterator_traits<IteratorType>::value_type DataType;
+ cl_int error;
+
+ size_type length = endIterator-startIterator;
+ size_type byteLength = length*sizeof(DataType);
+
+ DataType *pointer =
+ static_cast<DataType*>(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_READ, 0, byteLength, 0, 0, &error));
+ // if exceptions enabled, enqueueMapBuffer will throw
+ if( error != CL_SUCCESS ) {
+ return error;
+ }
+ std::copy(pointer, pointer + length, startIterator);
+ Event endEvent;
+ error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent);
+ // if exceptions enabled, enqueueUnmapMemObject will throw
+ if( error != CL_SUCCESS ) {
+ return error;
+ }
+ endEvent.wait();
+ return CL_SUCCESS;
+}
+
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+/**
+ * Blocking SVM map operation - performs a blocking map underneath.
+ */
+template<typename T, class Alloc>
+inline cl_int mapSVM(cl::vector<T, Alloc> &container)
+{
+ return enqueueMapSVM(container, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE);
+}
+
+/**
+* Blocking SVM map operation - performs a blocking map underneath.
+*/
+template<typename T, class Alloc>
+inline cl_int unmapSVM(cl::vector<T, Alloc> &container)
+{
+ return enqueueUnmapSVM(container);
+}
+
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 110
+inline cl_int enqueueReadBufferRect(
+ const Buffer& buffer,
+ cl_bool blocking,
+ const array<size_type, 3>& buffer_offset,
+ const array<size_type, 3>& host_offset,
+ const array<size_type, 3>& region,
+ size_type buffer_row_pitch,
+ size_type buffer_slice_pitch,
+ size_type host_row_pitch,
+ size_type host_slice_pitch,
+ void *ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueReadBufferRect(
+ buffer,
+ blocking,
+ buffer_offset,
+ host_offset,
+ region,
+ buffer_row_pitch,
+ buffer_slice_pitch,
+ host_row_pitch,
+ host_slice_pitch,
+ ptr,
+ events,
+ event);
+}
+
+inline cl_int enqueueWriteBufferRect(
+ const Buffer& buffer,
+ cl_bool blocking,
+ const array<size_type, 3>& buffer_offset,
+ const array<size_type, 3>& host_offset,
+ const array<size_type, 3>& region,
+ size_type buffer_row_pitch,
+ size_type buffer_slice_pitch,
+ size_type host_row_pitch,
+ size_type host_slice_pitch,
+ void *ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueWriteBufferRect(
+ buffer,
+ blocking,
+ buffer_offset,
+ host_offset,
+ region,
+ buffer_row_pitch,
+ buffer_slice_pitch,
+ host_row_pitch,
+ host_slice_pitch,
+ ptr,
+ events,
+ event);
+}
+
+inline cl_int enqueueCopyBufferRect(
+ const Buffer& src,
+ const Buffer& dst,
+ const array<size_type, 3>& src_origin,
+ const array<size_type, 3>& dst_origin,
+ const array<size_type, 3>& region,
+ size_type src_row_pitch,
+ size_type src_slice_pitch,
+ size_type dst_row_pitch,
+ size_type dst_slice_pitch,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueCopyBufferRect(
+ src,
+ dst,
+ src_origin,
+ dst_origin,
+ region,
+ src_row_pitch,
+ src_slice_pitch,
+ dst_row_pitch,
+ dst_slice_pitch,
+ events,
+ event);
+}
+#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110
+
+inline cl_int enqueueReadImage(
+ const Image& image,
+ cl_bool blocking,
+ const array<size_type, 3>& origin,
+ const array<size_type, 3>& region,
+ size_type row_pitch,
+ size_type slice_pitch,
+ void* ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueReadImage(
+ image,
+ blocking,
+ origin,
+ region,
+ row_pitch,
+ slice_pitch,
+ ptr,
+ events,
+ event);
+}
+
+inline cl_int enqueueWriteImage(
+ const Image& image,
+ cl_bool blocking,
+ const array<size_type, 3>& origin,
+ const array<size_type, 3>& region,
+ size_type row_pitch,
+ size_type slice_pitch,
+ void* ptr,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueWriteImage(
+ image,
+ blocking,
+ origin,
+ region,
+ row_pitch,
+ slice_pitch,
+ ptr,
+ events,
+ event);
+}
+
+inline cl_int enqueueCopyImage(
+ const Image& src,
+ const Image& dst,
+ const array<size_type, 3>& src_origin,
+ const array<size_type, 3>& dst_origin,
+ const array<size_type, 3>& region,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueCopyImage(
+ src,
+ dst,
+ src_origin,
+ dst_origin,
+ region,
+ events,
+ event);
+}
+
+inline cl_int enqueueCopyImageToBuffer(
+ const Image& src,
+ const Buffer& dst,
+ const array<size_type, 3>& src_origin,
+ const array<size_type, 3>& region,
+ size_type dst_offset,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueCopyImageToBuffer(
+ src,
+ dst,
+ src_origin,
+ region,
+ dst_offset,
+ events,
+ event);
+}
+
+inline cl_int enqueueCopyBufferToImage(
+ const Buffer& src,
+ const Image& dst,
+ size_type src_offset,
+ const array<size_type, 3>& dst_origin,
+ const array<size_type, 3>& region,
+ const vector<Event>* events = NULL,
+ Event* event = NULL)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.enqueueCopyBufferToImage(
+ src,
+ dst,
+ src_offset,
+ dst_origin,
+ region,
+ events,
+ event);
+}
+
+
+inline cl_int flush(void)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+ return queue.flush();
+}
+
+inline cl_int finish(void)
+{
+ cl_int error;
+ CommandQueue queue = CommandQueue::getDefault(&error);
+
+ if (error != CL_SUCCESS) {
+ return error;
+ }
+
+
+ return queue.finish();
+}
+
+class EnqueueArgs
+{
+private:
+ CommandQueue queue_;
+ const NDRange offset_;
+ const NDRange global_;
+ const NDRange local_;
+ vector<Event> events_;
+
+ template<typename... Ts>
+ friend class KernelFunctor;
+
+public:
+ EnqueueArgs(NDRange global) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange)
+ {
+
+ }
+
+ EnqueueArgs(NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(local)
+ {
+
+ }
+
+ EnqueueArgs(NDRange offset, NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(offset),
+ global_(global),
+ local_(local)
+ {
+
+ }
+
+ EnqueueArgs(Event e, NDRange global) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(Event e, NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(local)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(Event e, NDRange offset, NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(offset),
+ global_(global),
+ local_(local)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(const vector<Event> &events, NDRange global) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange),
+ events_(events)
+ {
+
+ }
+
+ EnqueueArgs(const vector<Event> &events, NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(NullRange),
+ global_(global),
+ local_(local),
+ events_(events)
+ {
+
+ }
+
+ EnqueueArgs(const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
+ queue_(CommandQueue::getDefault()),
+ offset_(offset),
+ global_(global),
+ local_(local),
+ events_(events)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, NDRange global) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(local)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, NDRange offset, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(offset),
+ global_(global),
+ local_(local)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, Event e, NDRange global) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(CommandQueue &queue, Event e, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(local)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(CommandQueue &queue, Event e, NDRange offset, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(offset),
+ global_(global),
+ local_(local)
+ {
+ events_.push_back(e);
+ }
+
+ EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(NullRange),
+ events_(events)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(NullRange),
+ global_(global),
+ local_(local),
+ events_(events)
+ {
+
+ }
+
+ EnqueueArgs(CommandQueue &queue, const vector<Event> &events, NDRange offset, NDRange global, NDRange local) :
+ queue_(queue),
+ offset_(offset),
+ global_(global),
+ local_(local),
+ events_(events)
+ {
+
+ }
+};
+
+
+//----------------------------------------------------------------------------------------------
+
+
+/**
+ * Type safe kernel functor.
+ *
+ */
+template<typename... Ts>
+class KernelFunctor
+{
+private:
+ Kernel kernel_;
+
+ template<int index, typename T0, typename... T1s>
+ void setArgs(T0&& t0, T1s&&... t1s)
+ {
+ kernel_.setArg(index, t0);
+ setArgs<index + 1, T1s...>(std::forward<T1s>(t1s)...);
+ }
+
+ template<int index, typename T0>
+ void setArgs(T0&& t0)
+ {
+ kernel_.setArg(index, t0);
+ }
+
+ template<int index>
+ void setArgs()
+ {
+ }
+
+
+public:
+ KernelFunctor(Kernel kernel) : kernel_(kernel)
+ {}
+
+ KernelFunctor(
+ const Program& program,
+ const string name,
+ cl_int * err = NULL) :
+ kernel_(program, name.c_str(), err)
+ {}
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ /**
+ * Enqueue kernel.
+ * @param args Launch parameters of the kernel.
+ * @param t0... List of kernel arguments based on the template type of the functor.
+ */
+ Event operator() (
+ const EnqueueArgs& args,
+ Ts... ts)
+ {
+ Event event;
+ setArgs<0>(std::forward<Ts>(ts)...);
+
+ args.queue_.enqueueNDRangeKernel(
+ kernel_,
+ args.offset_,
+ args.global_,
+ args.local_,
+ &args.events_,
+ &event);
+
+ return event;
+ }
+
+ /**
+ * Enqueue kernel with support for error code.
+ * @param args Launch parameters of the kernel.
+ * @param t0... List of kernel arguments based on the template type of the functor.
+ * @param error Out parameter returning the error code from the execution.
+ */
+ Event operator() (
+ const EnqueueArgs& args,
+ Ts... ts,
+ cl_int &error)
+ {
+ Event event;
+ setArgs<0>(std::forward<Ts>(ts)...);
+
+ error = args.queue_.enqueueNDRangeKernel(
+ kernel_,
+ args.offset_,
+ args.global_,
+ args.local_,
+ &args.events_,
+ &event);
+
+ return event;
+ }
+
+#if CL_HPP_TARGET_OPENCL_VERSION >= 200
+ cl_int setSVMPointers(const vector<void*> &pointerList)
+ {
+ return kernel_.setSVMPointers(pointerList);
+ }
+
+ template<typename T0, typename... T1s>
+ cl_int setSVMPointers(const T0 &t0, T1s... ts)
+ {
+ return kernel_.setSVMPointers(t0, ts...);
+ }
+#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200
+
+ Kernel getKernel()
+ {
+ return kernel_;
+ }
+};
+
+namespace compatibility {
+ /**
+ * Backward compatibility class to ensure that cl.hpp code works with cl2.hpp.
+ * Please use KernelFunctor directly.
+ */
+ template<typename... Ts>
+ struct make_kernel
+ {
+ typedef KernelFunctor<Ts...> FunctorType;
+
+ FunctorType functor_;
+
+ make_kernel(
+ const Program& program,
+ const string name,
+ cl_int * err = NULL) :
+ functor_(FunctorType(program, name, err))
+ {}
+
+ make_kernel(
+ const Kernel kernel) :
+ functor_(FunctorType(kernel))
+ {}
+
+ //! \brief Return type of the functor
+ typedef Event result_type;
+
+ //! \brief Function signature of kernel functor with no event dependency.
+ typedef Event type_(
+ const EnqueueArgs&,
+ Ts...);
+
+ Event operator()(
+ const EnqueueArgs& enqueueArgs,
+ Ts... args)
+ {
+ return functor_(
+ enqueueArgs, args...);
+ }
+ };
+} // namespace compatibility
+
+
+//----------------------------------------------------------------------------------------------------------------------
+
+#undef CL_HPP_ERR_STR_
+#if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS)
+#undef __GET_DEVICE_INFO_ERR
+#undef __GET_PLATFORM_INFO_ERR
+#undef __GET_DEVICE_IDS_ERR
+#undef __GET_CONTEXT_INFO_ERR
+#undef __GET_EVENT_INFO_ERR
+#undef __GET_EVENT_PROFILE_INFO_ERR
+#undef __GET_MEM_OBJECT_INFO_ERR
+#undef __GET_IMAGE_INFO_ERR
+#undef __GET_SAMPLER_INFO_ERR
+#undef __GET_KERNEL_INFO_ERR
+#undef __GET_KERNEL_ARG_INFO_ERR
+#undef __GET_KERNEL_WORK_GROUP_INFO_ERR
+#undef __GET_PROGRAM_INFO_ERR
+#undef __GET_PROGRAM_BUILD_INFO_ERR
+#undef __GET_COMMAND_QUEUE_INFO_ERR
+
+#undef __CREATE_CONTEXT_ERR
+#undef __CREATE_CONTEXT_FROM_TYPE_ERR
+#undef __GET_SUPPORTED_IMAGE_FORMATS_ERR
+
+#undef __CREATE_BUFFER_ERR
+#undef __CREATE_SUBBUFFER_ERR
+#undef __CREATE_IMAGE2D_ERR
+#undef __CREATE_IMAGE3D_ERR
+#undef __CREATE_SAMPLER_ERR
+#undef __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR
+
+#undef __CREATE_USER_EVENT_ERR
+#undef __SET_USER_EVENT_STATUS_ERR
+#undef __SET_EVENT_CALLBACK_ERR
+#undef __SET_PRINTF_CALLBACK_ERR
+
+#undef __WAIT_FOR_EVENTS_ERR
+
+#undef __CREATE_KERNEL_ERR
+#undef __SET_KERNEL_ARGS_ERR
+#undef __CREATE_PROGRAM_WITH_SOURCE_ERR
+#undef __CREATE_PROGRAM_WITH_BINARY_ERR
+#undef __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR
+#undef __BUILD_PROGRAM_ERR
+#undef __CREATE_KERNELS_IN_PROGRAM_ERR
+
+#undef __CREATE_COMMAND_QUEUE_ERR
+#undef __SET_COMMAND_QUEUE_PROPERTY_ERR
+#undef __ENQUEUE_READ_BUFFER_ERR
+#undef __ENQUEUE_WRITE_BUFFER_ERR
+#undef __ENQUEUE_READ_BUFFER_RECT_ERR
+#undef __ENQUEUE_WRITE_BUFFER_RECT_ERR
+#undef __ENQEUE_COPY_BUFFER_ERR
+#undef __ENQEUE_COPY_BUFFER_RECT_ERR
+#undef __ENQUEUE_READ_IMAGE_ERR
+#undef __ENQUEUE_WRITE_IMAGE_ERR
+#undef __ENQUEUE_COPY_IMAGE_ERR
+#undef __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR
+#undef __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR
+#undef __ENQUEUE_MAP_BUFFER_ERR
+#undef __ENQUEUE_MAP_IMAGE_ERR
+#undef __ENQUEUE_UNMAP_MEM_OBJECT_ERR
+#undef __ENQUEUE_NDRANGE_KERNEL_ERR
+#undef __ENQUEUE_TASK_ERR
+#undef __ENQUEUE_NATIVE_KERNEL
+
+#undef __UNLOAD_COMPILER_ERR
+#undef __CREATE_SUB_DEVICES_ERR
+
+#undef __CREATE_PIPE_ERR
+#undef __GET_PIPE_INFO_ERR
+
+#endif //CL_HPP_USER_OVERRIDE_ERROR_STRINGS
+
+// Extensions
+#undef CL_HPP_INIT_CL_EXT_FCN_PTR_
+#undef CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_
+
+#if defined(CL_HPP_USE_CL_DEVICE_FISSION)
+#undef CL_HPP_PARAM_NAME_DEVICE_FISSION_
+#endif // CL_HPP_USE_CL_DEVICE_FISSION
+
+#undef CL_HPP_NOEXCEPT_
+#undef CL_HPP_DEFINE_STATIC_MEMBER_
+
+} // namespace cl
+
+#endif // CL_HPP_
diff --git a/include/CL/cl_d3d10.h b/include/CL/cl_d3d10.h
new file mode 100644
index 0000000000..81b0d37214
--- /dev/null
+++ b/include/CL/cl_d3d10.h
@@ -0,0 +1,126 @@
+/**********************************************************************************
+ * Copyright (c) 2008-2012 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ **********************************************************************************/
+
+/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
+
+#ifndef __OPENCL_CL_D3D10_H
+#define __OPENCL_CL_D3D10_H
+
+#include <d3d10.h>
+#include <CL/cl.h>
+#include <CL/cl_platform.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/******************************************************************************
+ * cl_khr_d3d10_sharing */
+#define cl_khr_d3d10_sharing 1
+
+typedef cl_uint cl_d3d10_device_source_khr;
+typedef cl_uint cl_d3d10_device_set_khr;
+
+/******************************************************************************/
+
+// Error Codes
+#define CL_INVALID_D3D10_DEVICE_KHR -1002
+#define CL_INVALID_D3D10_RESOURCE_KHR -1003
+#define CL_D3D10_RESOURCE_ALREADY_ACQUIRED_KHR -1004
+#define CL_D3D10_RESOURCE_NOT_ACQUIRED_KHR -1005
+
+// cl_d3d10_device_source_nv
+#define CL_D3D10_DEVICE_KHR 0x4010
+#define CL_D3D10_DXGI_ADAPTER_KHR 0x4011
+
+// cl_d3d10_device_set_nv
+#define CL_PREFERRED_DEVICES_FOR_D3D10_KHR 0x4012
+#define CL_ALL_DEVICES_FOR_D3D10_KHR 0x4013
+
+// cl_context_info
+#define CL_CONTEXT_D3D10_DEVICE_KHR 0x4014
+#define CL_CONTEXT_D3D10_PREFER_SHARED_RESOURCES_KHR 0x402C
+
+// cl_mem_info
+#define CL_MEM_D3D10_RESOURCE_KHR 0x4015
+
+// cl_image_info
+#define CL_IMAGE_D3D10_SUBRESOURCE_KHR 0x4016
+
+// cl_command_type
+#define CL_COMMAND_ACQUIRE_D3D10_OBJECTS_KHR 0x4017
+#define CL_COMMAND_RELEASE_D3D10_OBJECTS_KHR 0x4018
+
+/******************************************************************************/
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetDeviceIDsFromD3D10KHR_fn)(
+ cl_platform_id platform,
+ cl_d3d10_device_source_khr d3d_device_source,
+ void * d3d_object,
+ cl_d3d10_device_set_khr d3d_device_set,
+ cl_uint num_entries,
+ cl_device_id * devices,
+ cl_uint * num_devices) CL_API_SUFFIX__VERSION_1_0;
+
+typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D10BufferKHR_fn)(
+ cl_context context,
+ cl_mem_flags flags,
+ ID3D10Buffer * resource,
+ cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0;
+
+typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D10Texture2DKHR_fn)(
+ cl_context context,
+ cl_mem_flags flags,
+ ID3D10Texture2D * resource,
+ UINT subresource,
+ cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0;
+
+typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D10Texture3DKHR_fn)(
+ cl_context context,
+ cl_mem_flags flags,
+ ID3D10Texture3D * resource,
+ UINT subresource,
+ cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0;
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueAcquireD3D10ObjectsKHR_fn)(
+ cl_command_queue command_queue,
+ cl_uint num_objects,
+ const cl_mem * mem_objects,
+ cl_uint num_events_in_wait_list,
+ const cl_event * event_wait_list,
+ cl_event * event) CL_API_SUFFIX__VERSION_1_0;
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueReleaseD3D10ObjectsKHR_fn)(
+ cl_command_queue command_queue,
+ cl_uint num_objects,
+ const cl_mem * mem_objects,
+ cl_uint num_events_in_wait_list,
+ const cl_event * event_wait_list,
+ cl_event * event) CL_API_SUFFIX__VERSION_1_0;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __OPENCL_CL_D3D10_H
+
diff --git a/include/CL/cl_d3d11.h b/include/CL/cl_d3d11.h
new file mode 100644
index 0000000000..d3c8bdc2b1
--- /dev/null
+++ b/include/CL/cl_d3d11.h
@@ -0,0 +1,126 @@
+/**********************************************************************************
+ * Copyright (c) 2008-2012 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ **********************************************************************************/
+
+/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
+
+#ifndef __OPENCL_CL_D3D11_H
+#define __OPENCL_CL_D3D11_H
+
+#include <d3d11.h>
+#include <CL/cl.h>
+#include <CL/cl_platform.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/******************************************************************************
+ * cl_khr_d3d11_sharing */
+#define cl_khr_d3d11_sharing 1
+
+typedef cl_uint cl_d3d11_device_source_khr;
+typedef cl_uint cl_d3d11_device_set_khr;
+
+/******************************************************************************/
+
+// Error Codes
+#define CL_INVALID_D3D11_DEVICE_KHR -1006
+#define CL_INVALID_D3D11_RESOURCE_KHR -1007
+#define CL_D3D11_RESOURCE_ALREADY_ACQUIRED_KHR -1008
+#define CL_D3D11_RESOURCE_NOT_ACQUIRED_KHR -1009
+
+// cl_d3d11_device_source
+#define CL_D3D11_DEVICE_KHR 0x4019
+#define CL_D3D11_DXGI_ADAPTER_KHR 0x401A
+
+// cl_d3d11_device_set
+#define CL_PREFERRED_DEVICES_FOR_D3D11_KHR 0x401B
+#define CL_ALL_DEVICES_FOR_D3D11_KHR 0x401C
+
+// cl_context_info
+#define CL_CONTEXT_D3D11_DEVICE_KHR 0x401D
+#define CL_CONTEXT_D3D11_PREFER_SHARED_RESOURCES_KHR 0x402D
+
+// cl_mem_info
+#define CL_MEM_D3D11_RESOURCE_KHR 0x401E
+
+// cl_image_info
+#define CL_IMAGE_D3D11_SUBRESOURCE_KHR 0x401F
+
+// cl_command_type
+#define CL_COMMAND_ACQUIRE_D3D11_OBJECTS_KHR 0x4020
+#define CL_COMMAND_RELEASE_D3D11_OBJECTS_KHR 0x4021
+
+/******************************************************************************/
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetDeviceIDsFromD3D11KHR_fn)(
+ cl_platform_id platform,
+ cl_d3d11_device_source_khr d3d_device_source,
+ void * d3d_object,
+ cl_d3d11_device_set_khr d3d_device_set,
+ cl_uint num_entries,
+ cl_device_id * devices,
+ cl_uint * num_devices) CL_API_SUFFIX__VERSION_1_2;
+
+typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D11BufferKHR_fn)(
+ cl_context context,
+ cl_mem_flags flags,
+ ID3D11Buffer * resource,
+ cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2;
+
+typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D11Texture2DKHR_fn)(
+ cl_context context,
+ cl_mem_flags flags,
+ ID3D11Texture2D * resource,
+ UINT subresource,
+ cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2;
+
+typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D11Texture3DKHR_fn)(
+ cl_context context,
+ cl_mem_flags flags,
+ ID3D11Texture3D * resource,
+ UINT subresource,
+ cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2;
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueAcquireD3D11ObjectsKHR_fn)(
+ cl_command_queue command_queue,
+ cl_uint num_objects,
+ const cl_mem * mem_objects,
+ cl_uint num_events_in_wait_list,
+ const cl_event * event_wait_list,
+ cl_event * event) CL_API_SUFFIX__VERSION_1_2;
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueReleaseD3D11ObjectsKHR_fn)(
+ cl_command_queue command_queue,
+ cl_uint num_objects,
+ const cl_mem * mem_objects,
+ cl_uint num_events_in_wait_list,
+ const cl_event * event_wait_list,
+ cl_event * event) CL_API_SUFFIX__VERSION_1_2;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __OPENCL_CL_D3D11_H
+
diff --git a/include/CL/cl_dx9_media_sharing.h b/include/CL/cl_dx9_media_sharing.h
new file mode 100644
index 0000000000..1ef543a5af
--- /dev/null
+++ b/include/CL/cl_dx9_media_sharing.h
@@ -0,0 +1,127 @@
+/**********************************************************************************
+ * Copyright (c) 2008-2012 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ **********************************************************************************/
+
+/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
+
+#ifndef __OPENCL_CL_DX9_MEDIA_SHARING_H
+#define __OPENCL_CL_DX9_MEDIA_SHARING_H
+
+#include <CL/cl.h>
+#include <CL/cl_platform.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/******************************************************************************
+/* cl_khr_dx9_media_sharing */
+#define cl_khr_dx9_media_sharing 1
+
+typedef cl_uint cl_dx9_media_adapter_type_khr;
+typedef cl_uint cl_dx9_media_adapter_set_khr;
+
+#if defined(_WIN32)
+#include <d3d9.h>
+typedef struct _cl_dx9_surface_info_khr
+{
+ IDirect3DSurface9 *resource;
+ HANDLE shared_handle;
+} cl_dx9_surface_info_khr;
+#endif
+
+
+/******************************************************************************/
+
+// Error Codes
+#define CL_INVALID_DX9_MEDIA_ADAPTER_KHR -1010
+#define CL_INVALID_DX9_MEDIA_SURFACE_KHR -1011
+#define CL_DX9_MEDIA_SURFACE_ALREADY_ACQUIRED_KHR -1012
+#define CL_DX9_MEDIA_SURFACE_NOT_ACQUIRED_KHR -1013
+
+// cl_media_adapter_type_khr
+#define CL_ADAPTER_D3D9_KHR 0x2020
+#define CL_ADAPTER_D3D9EX_KHR 0x2021
+#define CL_ADAPTER_DXVA_KHR 0x2022
+
+// cl_media_adapter_set_khr
+#define CL_PREFERRED_DEVICES_FOR_DX9_MEDIA_ADAPTER_KHR 0x2023
+#define CL_ALL_DEVICES_FOR_DX9_MEDIA_ADAPTER_KHR 0x2024
+
+// cl_context_info
+#define CL_CONTEXT_ADAPTER_D3D9_KHR 0x2025
+#define CL_CONTEXT_ADAPTER_D3D9EX_KHR 0x2026
+#define CL_CONTEXT_ADAPTER_DXVA_KHR 0x2027
+
+// cl_mem_info
+#define CL_MEM_DX9_MEDIA_ADAPTER_TYPE_KHR 0x2028
+#define CL_MEM_DX9_MEDIA_SURFACE_INFO_KHR 0x2029
+
+// cl_image_info
+#define CL_IMAGE_DX9_MEDIA_PLANE_KHR 0x202A
+
+// cl_command_type
+#define CL_COMMAND_ACQUIRE_DX9_MEDIA_SURFACES_KHR 0x202B
+#define CL_COMMAND_RELEASE_DX9_MEDIA_SURFACES_KHR 0x202C
+
+/******************************************************************************/
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetDeviceIDsFromDX9MediaAdapterKHR_fn)(
+ cl_platform_id platform,
+ cl_uint num_media_adapters,
+ cl_dx9_media_adapter_type_khr * media_adapter_type,
+ void * media_adapters,
+ cl_dx9_media_adapter_set_khr media_adapter_set,
+ cl_uint num_entries,
+ cl_device_id * devices,
+ cl_uint * num_devices) CL_API_SUFFIX__VERSION_1_2;
+
+typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromDX9MediaSurfaceKHR_fn)(
+ cl_context context,
+ cl_mem_flags flags,
+ cl_dx9_media_adapter_type_khr adapter_type,
+ void * surface_info,
+ cl_uint plane,
+ cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2;
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueAcquireDX9MediaSurfacesKHR_fn)(
+ cl_command_queue command_queue,
+ cl_uint num_objects,
+ const cl_mem * mem_objects,
+ cl_uint num_events_in_wait_list,
+ const cl_event * event_wait_list,
+ cl_event * event) CL_API_SUFFIX__VERSION_1_2;
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueReleaseDX9MediaSurfacesKHR_fn)(
+ cl_command_queue command_queue,
+ cl_uint num_objects,
+ const cl_mem * mem_objects,
+ cl_uint num_events_in_wait_list,
+ const cl_event * event_wait_list,
+ cl_event * event) CL_API_SUFFIX__VERSION_1_2;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __OPENCL_CL_DX9_MEDIA_SHARING_H
+
diff --git a/include/CL/cl_egl.h b/include/CL/cl_egl.h
new file mode 100644
index 0000000000..c1bd4f3942
--- /dev/null
+++ b/include/CL/cl_egl.h
@@ -0,0 +1,131 @@
+/*******************************************************************************
+ * Copyright (c) 2008-2010 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ ******************************************************************************/
+
+#ifndef __OPENCL_CL_EGL_H
+#define __OPENCL_CL_EGL_H
+
+#ifdef __APPLE__
+
+#else
+#include <CL/cl.h>
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Command type for events created with clEnqueueAcquireEGLObjectsKHR */
+#define CL_COMMAND_EGL_FENCE_SYNC_OBJECT_KHR 0x202F
+#define CL_COMMAND_ACQUIRE_EGL_OBJECTS_KHR 0x202D
+#define CL_COMMAND_RELEASE_EGL_OBJECTS_KHR 0x202E
+
+/* Error type for clCreateFromEGLImageKHR */
+#define CL_INVALID_EGL_OBJECT_KHR -1093
+#define CL_EGL_RESOURCE_NOT_ACQUIRED_KHR -1092
+
+/* CLeglImageKHR is an opaque handle to an EGLImage */
+typedef void* CLeglImageKHR;
+
+/* CLeglDisplayKHR is an opaque handle to an EGLDisplay */
+typedef void* CLeglDisplayKHR;
+
+/* properties passed to clCreateFromEGLImageKHR */
+typedef intptr_t cl_egl_image_properties_khr;
+
+
+#define cl_khr_egl_image 1
+
+extern CL_API_ENTRY cl_mem CL_API_CALL
+clCreateFromEGLImageKHR(cl_context /* context */,
+ CLeglDisplayKHR /* egldisplay */,
+ CLeglImageKHR /* eglimage */,
+ cl_mem_flags /* flags */,
+ const cl_egl_image_properties_khr * /* properties */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromEGLImageKHR_fn)(
+ cl_context context,
+ CLeglDisplayKHR egldisplay,
+ CLeglImageKHR eglimage,
+ cl_mem_flags flags,
+ const cl_egl_image_properties_khr * properties,
+ cl_int * errcode_ret);
+
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueAcquireEGLObjectsKHR(cl_command_queue /* command_queue */,
+ cl_uint /* num_objects */,
+ const cl_mem * /* mem_objects */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueAcquireEGLObjectsKHR_fn)(
+ cl_command_queue command_queue,
+ cl_uint num_objects,
+ const cl_mem * mem_objects,
+ cl_uint num_events_in_wait_list,
+ const cl_event * event_wait_list,
+ cl_event * event);
+
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueReleaseEGLObjectsKHR(cl_command_queue /* command_queue */,
+ cl_uint /* num_objects */,
+ const cl_mem * /* mem_objects */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueReleaseEGLObjectsKHR_fn)(
+ cl_command_queue command_queue,
+ cl_uint num_objects,
+ const cl_mem * mem_objects,
+ cl_uint num_events_in_wait_list,
+ const cl_event * event_wait_list,
+ cl_event * event);
+
+
+#define cl_khr_egl_event 1
+
+extern CL_API_ENTRY cl_event CL_API_CALL
+clCreateEventFromEGLSyncKHR(cl_context /* context */,
+ EGLSyncKHR /* sync */,
+ EGLDisplay /* display */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+typedef CL_API_ENTRY cl_event (CL_API_CALL *clCreateEventFromEGLSyncKHR_fn)(
+ cl_context context,
+ EGLSyncKHR sync,
+ EGLDisplay display,
+ cl_int * errcode_ret);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __OPENCL_CL_EGL_H */
diff --git a/include/CL/cl_ext.h b/include/CL/cl_ext.h
new file mode 100644
index 0000000000..710bea8837
--- /dev/null
+++ b/include/CL/cl_ext.h
@@ -0,0 +1,316 @@
+/*******************************************************************************
+ * Copyright (c) 2008-2013 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ ******************************************************************************/
+
+/* $Revision: 11928 $ on $Date: 2010-07-13 09:04:56 -0700 (Tue, 13 Jul 2010) $ */
+
+/* cl_ext.h contains OpenCL extensions which don't have external */
+/* (OpenGL, D3D) dependencies. */
+
+#ifndef __CL_EXT_H
+#define __CL_EXT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __APPLE__
+ #include <OpenCL/cl.h>
+ #include <AvailabilityMacros.h>
+#else
+ #include <CL/cl.h>
+#endif
+
+/* cl_khr_fp16 extension - no extension #define since it has no functions */
+#define CL_DEVICE_HALF_FP_CONFIG 0x1033
+
+/* Memory object destruction
+ *
+ * Apple extension for use to manage externally allocated buffers used with cl_mem objects with CL_MEM_USE_HOST_PTR
+ *
+ * Registers a user callback function that will be called when the memory object is deleted and its resources
+ * freed. Each call to clSetMemObjectCallbackFn registers the specified user callback function on a callback
+ * stack associated with memobj. The registered user callback functions are called in the reverse order in
+ * which they were registered. The user callback functions are called and then the memory object is deleted
+ * and its resources freed. This provides a mechanism for the application (and libraries) using memobj to be
+ * notified when the memory referenced by host_ptr, specified when the memory object is created and used as
+ * the storage bits for the memory object, can be reused or freed.
+ *
+ * The application may not call CL api's with the cl_mem object passed to the pfn_notify.
+ *
+ * Please check for the "cl_APPLE_SetMemObjectDestructor" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)
+ * before using.
+ */
+#define cl_APPLE_SetMemObjectDestructor 1
+cl_int CL_API_ENTRY clSetMemObjectDestructorAPPLE( cl_mem /* memobj */,
+ void (* /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/),
+ void * /*user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
+
+
+/* Context Logging Functions
+ *
+ * The next three convenience functions are intended to be used as the pfn_notify parameter to clCreateContext().
+ * Please check for the "cl_APPLE_ContextLoggingFunctions" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)
+ * before using.
+ *
+ * clLogMessagesToSystemLog fowards on all log messages to the Apple System Logger
+ */
+#define cl_APPLE_ContextLoggingFunctions 1
+extern void CL_API_ENTRY clLogMessagesToSystemLogAPPLE( const char * /* errstr */,
+ const void * /* private_info */,
+ size_t /* cb */,
+ void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
+
+/* clLogMessagesToStdout sends all log messages to the file descriptor stdout */
+extern void CL_API_ENTRY clLogMessagesToStdoutAPPLE( const char * /* errstr */,
+ const void * /* private_info */,
+ size_t /* cb */,
+ void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
+
+/* clLogMessagesToStderr sends all log messages to the file descriptor stderr */
+extern void CL_API_ENTRY clLogMessagesToStderrAPPLE( const char * /* errstr */,
+ const void * /* private_info */,
+ size_t /* cb */,
+ void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
+
+
+/************************
+* cl_khr_icd extension *
+************************/
+#define cl_khr_icd 1
+
+/* cl_platform_info */
+#define CL_PLATFORM_ICD_SUFFIX_KHR 0x0920
+
+/* Additional Error Codes */
+#define CL_PLATFORM_NOT_FOUND_KHR -1001
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clIcdGetPlatformIDsKHR(cl_uint /* num_entries */,
+ cl_platform_id * /* platforms */,
+ cl_uint * /* num_platforms */);
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clIcdGetPlatformIDsKHR_fn)(
+ cl_uint /* num_entries */,
+ cl_platform_id * /* platforms */,
+ cl_uint * /* num_platforms */);
+
+
+/* Extension: cl_khr_image2D_buffer
+ *
+ * This extension allows a 2D image to be created from a cl_mem buffer without a copy.
+ * The type associated with a 2D image created from a buffer in an OpenCL program is image2d_t.
+ * Both the sampler and sampler-less read_image built-in functions are supported for 2D images
+ * and 2D images created from a buffer. Similarly, the write_image built-ins are also supported
+ * for 2D images created from a buffer.
+ *
+ * When the 2D image from buffer is created, the client must specify the width,
+ * height, image format (i.e. channel order and channel data type) and optionally the row pitch
+ *
+ * The pitch specified must be a multiple of CL_DEVICE_IMAGE_PITCH_ALIGNMENT pixels.
+ * The base address of the buffer must be aligned to CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT pixels.
+ */
+
+/*************************************
+ * cl_khr_initalize_memory extension *
+ *************************************/
+
+#define CL_CONTEXT_MEMORY_INITIALIZE_KHR 0x200E
+
+
+/**************************************
+ * cl_khr_terminate_context extension *
+ **************************************/
+
+#define CL_DEVICE_TERMINATE_CAPABILITY_KHR 0x200F
+#define CL_CONTEXT_TERMINATE_KHR 0x2010
+
+#define cl_khr_terminate_context 1
+extern CL_API_ENTRY cl_int CL_API_CALL clTerminateContextKHR(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clTerminateContextKHR_fn)(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;
+
+
+/*
+ * Extension: cl_khr_spir
+ *
+ * This extension adds support to create an OpenCL program object from a
+ * Standard Portable Intermediate Representation (SPIR) instance
+ */
+
+#define CL_DEVICE_SPIR_VERSIONS 0x40E0
+#define CL_PROGRAM_BINARY_TYPE_INTERMEDIATE 0x40E1
+
+
+/******************************************
+* cl_nv_device_attribute_query extension *
+******************************************/
+/* cl_nv_device_attribute_query extension - no extension #define since it has no functions */
+#define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
+#define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
+#define CL_DEVICE_REGISTERS_PER_BLOCK_NV 0x4002
+#define CL_DEVICE_WARP_SIZE_NV 0x4003
+#define CL_DEVICE_GPU_OVERLAP_NV 0x4004
+#define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
+#define CL_DEVICE_INTEGRATED_MEMORY_NV 0x4006
+
+/*********************************
+* cl_amd_device_attribute_query *
+*********************************/
+#define CL_DEVICE_PROFILING_TIMER_OFFSET_AMD 0x4036
+
+/*********************************
+* cl_arm_printf extension
+*********************************/
+#define CL_PRINTF_CALLBACK_ARM 0x40B0
+#define CL_PRINTF_BUFFERSIZE_ARM 0x40B1
+
+#ifdef CL_VERSION_1_1
+ /***********************************
+ * cl_ext_device_fission extension *
+ ***********************************/
+ #define cl_ext_device_fission 1
+
+ extern CL_API_ENTRY cl_int CL_API_CALL
+ clReleaseDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
+
+ typedef CL_API_ENTRY cl_int
+ (CL_API_CALL *clReleaseDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
+
+ extern CL_API_ENTRY cl_int CL_API_CALL
+ clRetainDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
+
+ typedef CL_API_ENTRY cl_int
+ (CL_API_CALL *clRetainDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
+
+ typedef cl_ulong cl_device_partition_property_ext;
+ extern CL_API_ENTRY cl_int CL_API_CALL
+ clCreateSubDevicesEXT( cl_device_id /*in_device*/,
+ const cl_device_partition_property_ext * /* properties */,
+ cl_uint /*num_entries*/,
+ cl_device_id * /*out_devices*/,
+ cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
+
+ typedef CL_API_ENTRY cl_int
+ ( CL_API_CALL * clCreateSubDevicesEXT_fn)( cl_device_id /*in_device*/,
+ const cl_device_partition_property_ext * /* properties */,
+ cl_uint /*num_entries*/,
+ cl_device_id * /*out_devices*/,
+ cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
+
+ /* cl_device_partition_property_ext */
+ #define CL_DEVICE_PARTITION_EQUALLY_EXT 0x4050
+ #define CL_DEVICE_PARTITION_BY_COUNTS_EXT 0x4051
+ #define CL_DEVICE_PARTITION_BY_NAMES_EXT 0x4052
+ #define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN_EXT 0x4053
+
+ /* clDeviceGetInfo selectors */
+ #define CL_DEVICE_PARENT_DEVICE_EXT 0x4054
+ #define CL_DEVICE_PARTITION_TYPES_EXT 0x4055
+ #define CL_DEVICE_AFFINITY_DOMAINS_EXT 0x4056
+ #define CL_DEVICE_REFERENCE_COUNT_EXT 0x4057
+ #define CL_DEVICE_PARTITION_STYLE_EXT 0x4058
+
+ /* error codes */
+ #define CL_DEVICE_PARTITION_FAILED_EXT -1057
+ #define CL_INVALID_PARTITION_COUNT_EXT -1058
+ #define CL_INVALID_PARTITION_NAME_EXT -1059
+
+ /* CL_AFFINITY_DOMAINs */
+ #define CL_AFFINITY_DOMAIN_L1_CACHE_EXT 0x1
+ #define CL_AFFINITY_DOMAIN_L2_CACHE_EXT 0x2
+ #define CL_AFFINITY_DOMAIN_L3_CACHE_EXT 0x3
+ #define CL_AFFINITY_DOMAIN_L4_CACHE_EXT 0x4
+ #define CL_AFFINITY_DOMAIN_NUMA_EXT 0x10
+ #define CL_AFFINITY_DOMAIN_NEXT_FISSIONABLE_EXT 0x100
+
+ /* cl_device_partition_property_ext list terminators */
+ #define CL_PROPERTIES_LIST_END_EXT ((cl_device_partition_property_ext) 0)
+ #define CL_PARTITION_BY_COUNTS_LIST_END_EXT ((cl_device_partition_property_ext) 0)
+ #define CL_PARTITION_BY_NAMES_LIST_END_EXT ((cl_device_partition_property_ext) 0 - 1)
+
+/*********************************
+* cl_qcom_ext_host_ptr extension
+*********************************/
+
+#define CL_MEM_EXT_HOST_PTR_QCOM (1 << 29)
+
+#define CL_DEVICE_EXT_MEM_PADDING_IN_BYTES_QCOM 0x40A0
+#define CL_DEVICE_PAGE_SIZE_QCOM 0x40A1
+#define CL_IMAGE_ROW_ALIGNMENT_QCOM 0x40A2
+#define CL_IMAGE_SLICE_ALIGNMENT_QCOM 0x40A3
+#define CL_MEM_HOST_UNCACHED_QCOM 0x40A4
+#define CL_MEM_HOST_WRITEBACK_QCOM 0x40A5
+#define CL_MEM_HOST_WRITETHROUGH_QCOM 0x40A6
+#define CL_MEM_HOST_WRITE_COMBINING_QCOM 0x40A7
+
+typedef cl_uint cl_image_pitch_info_qcom;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetDeviceImageInfoQCOM(cl_device_id device,
+ size_t image_width,
+ size_t image_height,
+ const cl_image_format *image_format,
+ cl_image_pitch_info_qcom param_name,
+ size_t param_value_size,
+ void *param_value,
+ size_t *param_value_size_ret);
+
+typedef struct _cl_mem_ext_host_ptr
+{
+ /* Type of external memory allocation. */
+ /* Legal values will be defined in layered extensions. */
+ cl_uint allocation_type;
+
+ /* Host cache policy for this external memory allocation. */
+ cl_uint host_cache_policy;
+
+} cl_mem_ext_host_ptr;
+
+/*********************************
+* cl_qcom_ion_host_ptr extension
+*********************************/
+
+#define CL_MEM_ION_HOST_PTR_QCOM 0x40A8
+
+typedef struct _cl_mem_ion_host_ptr
+{
+ /* Type of external memory allocation. */
+ /* Must be CL_MEM_ION_HOST_PTR_QCOM for ION allocations. */
+ cl_mem_ext_host_ptr ext_host_ptr;
+
+ /* ION file descriptor */
+ int ion_filedesc;
+
+ /* Host pointer to the ION allocated memory */
+ void* ion_hostptr;
+
+} cl_mem_ion_host_ptr;
+
+#endif /* CL_VERSION_1_1 */
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* __CL_EXT_H */
diff --git a/include/CL/cl_gl.h b/include/CL/cl_gl.h
new file mode 100644
index 0000000000..af2036cc99
--- /dev/null
+++ b/include/CL/cl_gl.h
@@ -0,0 +1,162 @@
+/**********************************************************************************
+ * Copyright (c) 2008 - 2012 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ **********************************************************************************/
+
+#ifndef __OPENCL_CL_GL_H
+#define __OPENCL_CL_GL_H
+
+#ifdef __APPLE__
+#include <OpenCL/cl.h>
+#else
+#include <CL/cl.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef cl_uint cl_gl_object_type;
+typedef cl_uint cl_gl_texture_info;
+typedef cl_uint cl_gl_platform_info;
+typedef struct __GLsync *cl_GLsync;
+
+/* cl_gl_object_type = 0x2000 - 0x200F enum values are currently taken */
+#define CL_GL_OBJECT_BUFFER 0x2000
+#define CL_GL_OBJECT_TEXTURE2D 0x2001
+#define CL_GL_OBJECT_TEXTURE3D 0x2002
+#define CL_GL_OBJECT_RENDERBUFFER 0x2003
+#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E
+#define CL_GL_OBJECT_TEXTURE1D 0x200F
+#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010
+#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011
+
+/* cl_gl_texture_info */
+#define CL_GL_TEXTURE_TARGET 0x2004
+#define CL_GL_MIPMAP_LEVEL 0x2005
+#define CL_GL_NUM_SAMPLES 0x2012
+
+
+extern CL_API_ENTRY cl_mem CL_API_CALL
+clCreateFromGLBuffer(cl_context /* context */,
+ cl_mem_flags /* flags */,
+ cl_GLuint /* bufobj */,
+ int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_mem CL_API_CALL
+clCreateFromGLTexture(cl_context /* context */,
+ cl_mem_flags /* flags */,
+ cl_GLenum /* target */,
+ cl_GLint /* miplevel */,
+ cl_GLuint /* texture */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;
+
+extern CL_API_ENTRY cl_mem CL_API_CALL
+clCreateFromGLRenderbuffer(cl_context /* context */,
+ cl_mem_flags /* flags */,
+ cl_GLuint /* renderbuffer */,
+ cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetGLObjectInfo(cl_mem /* memobj */,
+ cl_gl_object_type * /* gl_object_type */,
+ cl_GLuint * /* gl_object_name */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetGLTextureInfo(cl_mem /* memobj */,
+ cl_gl_texture_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueAcquireGLObjects(cl_command_queue /* command_queue */,
+ cl_uint /* num_objects */,
+ const cl_mem * /* mem_objects */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clEnqueueReleaseGLObjects(cl_command_queue /* command_queue */,
+ cl_uint /* num_objects */,
+ const cl_mem * /* mem_objects */,
+ cl_uint /* num_events_in_wait_list */,
+ const cl_event * /* event_wait_list */,
+ cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
+
+
+// Deprecated OpenCL 1.1 APIs
+extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
+clCreateFromGLTexture2D(cl_context /* context */,
+ cl_mem_flags /* flags */,
+ cl_GLenum /* target */,
+ cl_GLint /* miplevel */,
+ cl_GLuint /* texture */,
+ cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+
+extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
+clCreateFromGLTexture3D(cl_context /* context */,
+ cl_mem_flags /* flags */,
+ cl_GLenum /* target */,
+ cl_GLint /* miplevel */,
+ cl_GLuint /* texture */,
+ cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
+
+/* cl_khr_gl_sharing extension */
+
+#define cl_khr_gl_sharing 1
+
+typedef cl_uint cl_gl_context_info;
+
+/* Additional Error Codes */
+#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000
+
+/* cl_gl_context_info */
+#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006
+#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007
+
+/* Additional cl_context_properties */
+#define CL_GL_CONTEXT_KHR 0x2008
+#define CL_EGL_DISPLAY_KHR 0x2009
+#define CL_GLX_DISPLAY_KHR 0x200A
+#define CL_WGL_HDC_KHR 0x200B
+#define CL_CGL_SHAREGROUP_KHR 0x200C
+
+extern CL_API_ENTRY cl_int CL_API_CALL
+clGetGLContextInfoKHR(const cl_context_properties * /* properties */,
+ cl_gl_context_info /* param_name */,
+ size_t /* param_value_size */,
+ void * /* param_value */,
+ size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
+
+typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetGLContextInfoKHR_fn)(
+ const cl_context_properties * properties,
+ cl_gl_context_info param_name,
+ size_t param_value_size,
+ void * param_value,
+ size_t * param_value_size_ret);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __OPENCL_CL_GL_H */
diff --git a/include/CL/cl_gl_ext.h b/include/CL/cl_gl_ext.h
new file mode 100644
index 0000000000..77d53536f6
--- /dev/null
+++ b/include/CL/cl_gl_ext.h
@@ -0,0 +1,69 @@
+/**********************************************************************************
+ * Copyright (c) 2008-2012 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ **********************************************************************************/
+
+/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
+
+/* cl_gl_ext.h contains vendor (non-KHR) OpenCL extensions which have */
+/* OpenGL dependencies. */
+
+#ifndef __OPENCL_CL_GL_EXT_H
+#define __OPENCL_CL_GL_EXT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __APPLE__
+ #include <OpenCL/cl_gl.h>
+#else
+ #include <CL/cl_gl.h>
+#endif
+
+/*
+ * For each extension, follow this template
+ * cl_VEN_extname extension */
+/* #define cl_VEN_extname 1
+ * ... define new types, if any
+ * ... define new tokens, if any
+ * ... define new APIs, if any
+ *
+ * If you need GLtypes here, mirror them with a cl_GLtype, rather than including a GL header
+ * This allows us to avoid having to decide whether to include GL headers or GLES here.
+ */
+
+/*
+ * cl_khr_gl_event extension
+ * See section 9.9 in the OpenCL 1.1 spec for more information
+ */
+#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D
+
+extern CL_API_ENTRY cl_event CL_API_CALL
+clCreateEventFromGLsyncKHR(cl_context /* context */,
+ cl_GLsync /* cl_GLsync */,
+ cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __OPENCL_CL_GL_EXT_H */
diff --git a/include/CL/cl_platform.h b/include/CL/cl_platform.h
new file mode 100644
index 0000000000..cf2b7210ac
--- /dev/null
+++ b/include/CL/cl_platform.h
@@ -0,0 +1,1254 @@
+/**********************************************************************************
+ * Copyright (c) 2008-2012 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ **********************************************************************************/
+
+/* $Revision: 11803 $ on $Date: 2010-06-25 10:02:12 -0700 (Fri, 25 Jun 2010) $ */
+
+#ifndef __CL_PLATFORM_H
+#define __CL_PLATFORM_H
+
+#ifdef __APPLE__
+ /* Contains #defines for AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER below */
+ #include <AvailabilityMacros.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(_WIN32)
+ #define CL_API_ENTRY
+ #define CL_API_CALL __stdcall
+ #define CL_CALLBACK __stdcall
+#else
+ #define CL_API_ENTRY
+ #define CL_API_CALL
+ #define CL_CALLBACK
+#endif
+
+#ifdef __APPLE__
+ #define CL_EXTENSION_WEAK_LINK __attribute__((weak_import))
+ #define CL_API_SUFFIX__VERSION_1_0 AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER
+ #define CL_EXT_SUFFIX__VERSION_1_0 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER
+ #define CL_API_SUFFIX__VERSION_1_1 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
+ #define GCL_API_SUFFIX__VERSION_1_1 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
+ #define CL_EXT_SUFFIX__VERSION_1_1 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
+ #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER_BUT_DEPRECATED_IN_MAC_OS_X_VERSION_10_7
+
+ #ifdef AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER
+ #define CL_API_SUFFIX__VERSION_1_2 AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER
+ #define GCL_API_SUFFIX__VERSION_1_2 AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER
+ #define CL_EXT_SUFFIX__VERSION_1_2 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER
+ #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER_BUT_DEPRECATED_IN_MAC_OS_X_VERSION_10_8
+ #else
+ #warning This path should never happen outside of internal operating system development. AvailabilityMacros do not function correctly here!
+ #define CL_API_SUFFIX__VERSION_1_2 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
+ #define GCL_API_SUFFIX__VERSION_1_2 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
+ #define CL_EXT_SUFFIX__VERSION_1_2 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
+ #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
+ #endif
+#else
+ #define CL_EXTENSION_WEAK_LINK
+ #define CL_API_SUFFIX__VERSION_1_0
+ #define CL_EXT_SUFFIX__VERSION_1_0
+ #define CL_API_SUFFIX__VERSION_1_1
+ #define CL_EXT_SUFFIX__VERSION_1_1
+ #define CL_API_SUFFIX__VERSION_1_2
+ #define CL_EXT_SUFFIX__VERSION_1_2
+
+ #ifdef __GNUC__
+ #ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS
+ #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED
+ #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED
+ #else
+ #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED __attribute__((deprecated))
+ #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED
+ #endif
+
+ #ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS
+ #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+ #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ #else
+ #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED __attribute__((deprecated))
+ #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ #endif
+ #elif _WIN32
+ #ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS
+ #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED
+ #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED
+ #else
+ #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED
+ #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED __declspec(deprecated)
+ #endif
+
+ #ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS
+ #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+ #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ #else
+ #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+ #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED __declspec(deprecated)
+ #endif
+ #else
+ #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED
+ #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED
+
+ #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
+ #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
+ #endif
+#endif
+
+#if (defined (_WIN32) && defined(_MSC_VER))
+
+/* scalar types */
+typedef signed __int8 cl_char;
+typedef unsigned __int8 cl_uchar;
+typedef signed __int16 cl_short;
+typedef unsigned __int16 cl_ushort;
+typedef signed __int32 cl_int;
+typedef unsigned __int32 cl_uint;
+typedef signed __int64 cl_long;
+typedef unsigned __int64 cl_ulong;
+
+typedef unsigned __int16 cl_half;
+typedef float cl_float;
+typedef double cl_double;
+
+/* Macro names and corresponding values defined by OpenCL */
+#define CL_CHAR_BIT 8
+#define CL_SCHAR_MAX 127
+#define CL_SCHAR_MIN (-127-1)
+#define CL_CHAR_MAX CL_SCHAR_MAX
+#define CL_CHAR_MIN CL_SCHAR_MIN
+#define CL_UCHAR_MAX 255
+#define CL_SHRT_MAX 32767
+#define CL_SHRT_MIN (-32767-1)
+#define CL_USHRT_MAX 65535
+#define CL_INT_MAX 2147483647
+#define CL_INT_MIN (-2147483647-1)
+#define CL_UINT_MAX 0xffffffffU
+#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL)
+#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL)
+#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL)
+
+#define CL_FLT_DIG 6
+#define CL_FLT_MANT_DIG 24
+#define CL_FLT_MAX_10_EXP +38
+#define CL_FLT_MAX_EXP +128
+#define CL_FLT_MIN_10_EXP -37
+#define CL_FLT_MIN_EXP -125
+#define CL_FLT_RADIX 2
+#define CL_FLT_MAX 340282346638528859811704183484516925440.0f
+#define CL_FLT_MIN 1.175494350822287507969e-38f
+#define CL_FLT_EPSILON 0x1.0p-23f
+
+#define CL_DBL_DIG 15
+#define CL_DBL_MANT_DIG 53
+#define CL_DBL_MAX_10_EXP +308
+#define CL_DBL_MAX_EXP +1024
+#define CL_DBL_MIN_10_EXP -307
+#define CL_DBL_MIN_EXP -1021
+#define CL_DBL_RADIX 2
+#define CL_DBL_MAX 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0
+#define CL_DBL_MIN 2.225073858507201383090e-308
+#define CL_DBL_EPSILON 2.220446049250313080847e-16
+
+#define CL_M_E 2.718281828459045090796
+#define CL_M_LOG2E 1.442695040888963387005
+#define CL_M_LOG10E 0.434294481903251816668
+#define CL_M_LN2 0.693147180559945286227
+#define CL_M_LN10 2.302585092994045901094
+#define CL_M_PI 3.141592653589793115998
+#define CL_M_PI_2 1.570796326794896557999
+#define CL_M_PI_4 0.785398163397448278999
+#define CL_M_1_PI 0.318309886183790691216
+#define CL_M_2_PI 0.636619772367581382433
+#define CL_M_2_SQRTPI 1.128379167095512558561
+#define CL_M_SQRT2 1.414213562373095145475
+#define CL_M_SQRT1_2 0.707106781186547572737
+
+#define CL_M_E_F 2.71828174591064f
+#define CL_M_LOG2E_F 1.44269502162933f
+#define CL_M_LOG10E_F 0.43429449200630f
+#define CL_M_LN2_F 0.69314718246460f
+#define CL_M_LN10_F 2.30258512496948f
+#define CL_M_PI_F 3.14159274101257f
+#define CL_M_PI_2_F 1.57079637050629f
+#define CL_M_PI_4_F 0.78539818525314f
+#define CL_M_1_PI_F 0.31830987334251f
+#define CL_M_2_PI_F 0.63661974668503f
+#define CL_M_2_SQRTPI_F 1.12837922573090f
+#define CL_M_SQRT2_F 1.41421353816986f
+#define CL_M_SQRT1_2_F 0.70710676908493f
+
+#define CL_NAN (CL_INFINITY - CL_INFINITY)
+#define CL_HUGE_VALF ((cl_float) 1e50)
+#define CL_HUGE_VAL ((cl_double) 1e500)
+#define CL_MAXFLOAT CL_FLT_MAX
+#define CL_INFINITY CL_HUGE_VALF
+
+#else
+
+#include <stdint.h>
+
+/* scalar types */
+typedef int8_t cl_char;
+typedef uint8_t cl_uchar;
+typedef int16_t cl_short __attribute__((aligned(2)));
+typedef uint16_t cl_ushort __attribute__((aligned(2)));
+typedef int32_t cl_int __attribute__((aligned(4)));
+typedef uint32_t cl_uint __attribute__((aligned(4)));
+typedef int64_t cl_long __attribute__((aligned(8)));
+typedef uint64_t cl_ulong __attribute__((aligned(8)));
+
+typedef uint16_t cl_half __attribute__((aligned(2)));
+typedef float cl_float __attribute__((aligned(4)));
+typedef double cl_double __attribute__((aligned(8)));
+
+/* Macro names and corresponding values defined by OpenCL */
+#define CL_CHAR_BIT 8
+#define CL_SCHAR_MAX 127
+#define CL_SCHAR_MIN (-127-1)
+#define CL_CHAR_MAX CL_SCHAR_MAX
+#define CL_CHAR_MIN CL_SCHAR_MIN
+#define CL_UCHAR_MAX 255
+#define CL_SHRT_MAX 32767
+#define CL_SHRT_MIN (-32767-1)
+#define CL_USHRT_MAX 65535
+#define CL_INT_MAX 2147483647
+#define CL_INT_MIN (-2147483647-1)
+#define CL_UINT_MAX 0xffffffffU
+#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL)
+#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL)
+#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL)
+
+#define CL_FLT_DIG 6
+#define CL_FLT_MANT_DIG 24
+#define CL_FLT_MAX_10_EXP +38
+#define CL_FLT_MAX_EXP +128
+#define CL_FLT_MIN_10_EXP -37
+#define CL_FLT_MIN_EXP -125
+#define CL_FLT_RADIX 2
+#define CL_FLT_MAX 0x1.fffffep127f
+#define CL_FLT_MIN 0x1.0p-126f
+#define CL_FLT_EPSILON 0x1.0p-23f
+
+#define CL_DBL_DIG 15
+#define CL_DBL_MANT_DIG 53
+#define CL_DBL_MAX_10_EXP +308
+#define CL_DBL_MAX_EXP +1024
+#define CL_DBL_MIN_10_EXP -307
+#define CL_DBL_MIN_EXP -1021
+#define CL_DBL_RADIX 2
+#define CL_DBL_MAX 0x1.fffffffffffffp1023
+#define CL_DBL_MIN 0x1.0p-1022
+#define CL_DBL_EPSILON 0x1.0p-52
+
+#define CL_M_E 2.718281828459045090796
+#define CL_M_LOG2E 1.442695040888963387005
+#define CL_M_LOG10E 0.434294481903251816668
+#define CL_M_LN2 0.693147180559945286227
+#define CL_M_LN10 2.302585092994045901094
+#define CL_M_PI 3.141592653589793115998
+#define CL_M_PI_2 1.570796326794896557999
+#define CL_M_PI_4 0.785398163397448278999
+#define CL_M_1_PI 0.318309886183790691216
+#define CL_M_2_PI 0.636619772367581382433
+#define CL_M_2_SQRTPI 1.128379167095512558561
+#define CL_M_SQRT2 1.414213562373095145475
+#define CL_M_SQRT1_2 0.707106781186547572737
+
+#define CL_M_E_F 2.71828174591064f
+#define CL_M_LOG2E_F 1.44269502162933f
+#define CL_M_LOG10E_F 0.43429449200630f
+#define CL_M_LN2_F 0.69314718246460f
+#define CL_M_LN10_F 2.30258512496948f
+#define CL_M_PI_F 3.14159274101257f
+#define CL_M_PI_2_F 1.57079637050629f
+#define CL_M_PI_4_F 0.78539818525314f
+#define CL_M_1_PI_F 0.31830987334251f
+#define CL_M_2_PI_F 0.63661974668503f
+#define CL_M_2_SQRTPI_F 1.12837922573090f
+#define CL_M_SQRT2_F 1.41421353816986f
+#define CL_M_SQRT1_2_F 0.70710676908493f
+
+#if defined( __GNUC__ )
+ #define CL_HUGE_VALF __builtin_huge_valf()
+ #define CL_HUGE_VAL __builtin_huge_val()
+ #define CL_NAN __builtin_nanf( "" )
+#else
+ #define CL_HUGE_VALF ((cl_float) 1e50)
+ #define CL_HUGE_VAL ((cl_double) 1e500)
+ float nanf( const char * );
+ #define CL_NAN nanf( "" )
+#endif
+#define CL_MAXFLOAT CL_FLT_MAX
+#define CL_INFINITY CL_HUGE_VALF
+
+#endif
+
+#include <stddef.h>
+
+/* Mirror types to GL types. Mirror types allow us to avoid deciding which 87s to load based on whether we are using GL or GLES here. */
+typedef unsigned int cl_GLuint;
+typedef int cl_GLint;
+typedef unsigned int cl_GLenum;
+
+/*
+ * Vector types
+ *
+ * Note: OpenCL requires that all types be naturally aligned.
+ * This means that vector types must be naturally aligned.
+ * For example, a vector of four floats must be aligned to
+ * a 16 byte boundary (calculated as 4 * the natural 4-byte
+ * alignment of the float). The alignment qualifiers here
+ * will only function properly if your compiler supports them
+ * and if you don't actively work to defeat them. For example,
+ * in order for a cl_float4 to be 16 byte aligned in a struct,
+ * the start of the struct must itself be 16-byte aligned.
+ *
+ * Maintaining proper alignment is the user's responsibility.
+ */
+
+/* Define basic vector types */
+#if defined( __VEC__ )
+ #include <altivec.h> /* may be omitted depending on compiler. AltiVec spec provides no way to detect whether the header is required. */
+ typedef vector unsigned char __cl_uchar16;
+ typedef vector signed char __cl_char16;
+ typedef vector unsigned short __cl_ushort8;
+ typedef vector signed short __cl_short8;
+ typedef vector unsigned int __cl_uint4;
+ typedef vector signed int __cl_int4;
+ typedef vector float __cl_float4;
+ #define __CL_UCHAR16__ 1
+ #define __CL_CHAR16__ 1
+ #define __CL_USHORT8__ 1
+ #define __CL_SHORT8__ 1
+ #define __CL_UINT4__ 1
+ #define __CL_INT4__ 1
+ #define __CL_FLOAT4__ 1
+#endif
+
+#if defined( __SSE__ )
+ #if defined( __MINGW64__ )
+ #include <intrin.h>
+ #else
+ #include <xmmintrin.h>
+ #endif
+ #if defined( __GNUC__ )
+ typedef float __cl_float4 __attribute__((vector_size(16)));
+ #else
+ typedef __m128 __cl_float4;
+ #endif
+ #define __CL_FLOAT4__ 1
+#endif
+
+#if defined( __SSE2__ )
+ #if defined( __MINGW64__ )
+ #include <intrin.h>
+ #else
+ #include <emmintrin.h>
+ #endif
+ #if defined( __GNUC__ )
+ typedef cl_uchar __cl_uchar16 __attribute__((vector_size(16)));
+ typedef cl_char __cl_char16 __attribute__((vector_size(16)));
+ typedef cl_ushort __cl_ushort8 __attribute__((vector_size(16)));
+ typedef cl_short __cl_short8 __attribute__((vector_size(16)));
+ typedef cl_uint __cl_uint4 __attribute__((vector_size(16)));
+ typedef cl_int __cl_int4 __attribute__((vector_size(16)));
+ typedef cl_ulong __cl_ulong2 __attribute__((vector_size(16)));
+ typedef cl_long __cl_long2 __attribute__((vector_size(16)));
+ typedef cl_double __cl_double2 __attribute__((vector_size(16)));
+ #else
+ typedef __m128i __cl_uchar16;
+ typedef __m128i __cl_char16;
+ typedef __m128i __cl_ushort8;
+ typedef __m128i __cl_short8;
+ typedef __m128i __cl_uint4;
+ typedef __m128i __cl_int4;
+ typedef __m128i __cl_ulong2;
+ typedef __m128i __cl_long2;
+ typedef __m128d __cl_double2;
+ #endif
+ #define __CL_UCHAR16__ 1
+ #define __CL_CHAR16__ 1
+ #define __CL_USHORT8__ 1
+ #define __CL_SHORT8__ 1
+ #define __CL_INT4__ 1
+ #define __CL_UINT4__ 1
+ #define __CL_ULONG2__ 1
+ #define __CL_LONG2__ 1
+ #define __CL_DOUBLE2__ 1
+#endif
+
+#if defined( __MMX__ )
+ #include <mmintrin.h>
+ #if defined( __GNUC__ )
+ typedef cl_uchar __cl_uchar8 __attribute__((vector_size(8)));
+ typedef cl_char __cl_char8 __attribute__((vector_size(8)));
+ typedef cl_ushort __cl_ushort4 __attribute__((vector_size(8)));
+ typedef cl_short __cl_short4 __attribute__((vector_size(8)));
+ typedef cl_uint __cl_uint2 __attribute__((vector_size(8)));
+ typedef cl_int __cl_int2 __attribute__((vector_size(8)));
+ typedef cl_ulong __cl_ulong1 __attribute__((vector_size(8)));
+ typedef cl_long __cl_long1 __attribute__((vector_size(8)));
+ typedef cl_float __cl_float2 __attribute__((vector_size(8)));
+ #else
+ typedef __m64 __cl_uchar8;
+ typedef __m64 __cl_char8;
+ typedef __m64 __cl_ushort4;
+ typedef __m64 __cl_short4;
+ typedef __m64 __cl_uint2;
+ typedef __m64 __cl_int2;
+ typedef __m64 __cl_ulong1;
+ typedef __m64 __cl_long1;
+ typedef __m64 __cl_float2;
+ #endif
+ #define __CL_UCHAR8__ 1
+ #define __CL_CHAR8__ 1
+ #define __CL_USHORT4__ 1
+ #define __CL_SHORT4__ 1
+ #define __CL_INT2__ 1
+ #define __CL_UINT2__ 1
+ #define __CL_ULONG1__ 1
+ #define __CL_LONG1__ 1
+ #define __CL_FLOAT2__ 1
+#endif
+
+#if defined( __AVX__ )
+ #if defined( __MINGW64__ )
+ #include <intrin.h>
+ #else
+ #include <immintrin.h>
+ #endif
+ #if defined( __GNUC__ )
+ typedef cl_float __cl_float8 __attribute__((vector_size(32)));
+ typedef cl_double __cl_double4 __attribute__((vector_size(32)));
+ #else
+ typedef __m256 __cl_float8;
+ typedef __m256d __cl_double4;
+ #endif
+ #define __CL_FLOAT8__ 1
+ #define __CL_DOUBLE4__ 1
+#endif
+
+/* Define alignment keys */
+#if defined( __GNUC__ )
+ #define CL_ALIGNED(_x) __attribute__ ((aligned(_x)))
+#elif defined( _WIN32) && (_MSC_VER)
+ /* Alignment keys neutered on windows because MSVC can't swallow function arguments with alignment requirements */
+ /* http://msdn.microsoft.com/en-us/library/373ak2y1%28VS.71%29.aspx */
+ /* #include <crtdefs.h> */
+ /* #define CL_ALIGNED(_x) _CRT_ALIGN(_x) */
+ #define CL_ALIGNED(_x)
+#else
+ #warning Need to implement some method to align data here
+ #define CL_ALIGNED(_x)
+#endif
+
+/* Indicate whether .xyzw, .s0123 and .hi.lo are supported */
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ /* .xyzw and .s0123...{f|F} are supported */
+ #define CL_HAS_NAMED_VECTOR_FIELDS 1
+ /* .hi and .lo are supported */
+ #define CL_HAS_HI_LO_VECTOR_FIELDS 1
+#endif
+
+/* Define cl_vector types */
+
+/* ---- cl_charn ---- */
+typedef union
+{
+ cl_char CL_ALIGNED(2) s[2];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_char x, y; };
+ __extension__ struct{ cl_char s0, s1; };
+ __extension__ struct{ cl_char lo, hi; };
+#endif
+#if defined( __CL_CHAR2__)
+ __cl_char2 v2;
+#endif
+}cl_char2;
+
+typedef union
+{
+ cl_char CL_ALIGNED(4) s[4];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_char x, y, z, w; };
+ __extension__ struct{ cl_char s0, s1, s2, s3; };
+ __extension__ struct{ cl_char2 lo, hi; };
+#endif
+#if defined( __CL_CHAR2__)
+ __cl_char2 v2[2];
+#endif
+#if defined( __CL_CHAR4__)
+ __cl_char4 v4;
+#endif
+}cl_char4;
+
+/* cl_char3 is identical in size, alignment and behavior to cl_char4. See section 6.1.5. */
+typedef cl_char4 cl_char3;
+
+typedef union
+{
+ cl_char CL_ALIGNED(8) s[8];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_char x, y, z, w; };
+ __extension__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7; };
+ __extension__ struct{ cl_char4 lo, hi; };
+#endif
+#if defined( __CL_CHAR2__)
+ __cl_char2 v2[4];
+#endif
+#if defined( __CL_CHAR4__)
+ __cl_char4 v4[2];
+#endif
+#if defined( __CL_CHAR8__ )
+ __cl_char8 v8;
+#endif
+}cl_char8;
+
+typedef union
+{
+ cl_char CL_ALIGNED(16) s[16];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_char x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
+ __extension__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
+ __extension__ struct{ cl_char8 lo, hi; };
+#endif
+#if defined( __CL_CHAR2__)
+ __cl_char2 v2[8];
+#endif
+#if defined( __CL_CHAR4__)
+ __cl_char4 v4[4];
+#endif
+#if defined( __CL_CHAR8__ )
+ __cl_char8 v8[2];
+#endif
+#if defined( __CL_CHAR16__ )
+ __cl_char16 v16;
+#endif
+}cl_char16;
+
+
+/* ---- cl_ucharn ---- */
+typedef union
+{
+ cl_uchar CL_ALIGNED(2) s[2];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_uchar x, y; };
+ __extension__ struct{ cl_uchar s0, s1; };
+ __extension__ struct{ cl_uchar lo, hi; };
+#endif
+#if defined( __cl_uchar2__)
+ __cl_uchar2 v2;
+#endif
+}cl_uchar2;
+
+typedef union
+{
+ cl_uchar CL_ALIGNED(4) s[4];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_uchar x, y, z, w; };
+ __extension__ struct{ cl_uchar s0, s1, s2, s3; };
+ __extension__ struct{ cl_uchar2 lo, hi; };
+#endif
+#if defined( __CL_UCHAR2__)
+ __cl_uchar2 v2[2];
+#endif
+#if defined( __CL_UCHAR4__)
+ __cl_uchar4 v4;
+#endif
+}cl_uchar4;
+
+/* cl_uchar3 is identical in size, alignment and behavior to cl_uchar4. See section 6.1.5. */
+typedef cl_uchar4 cl_uchar3;
+
+typedef union
+{
+ cl_uchar CL_ALIGNED(8) s[8];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_uchar x, y, z, w; };
+ __extension__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7; };
+ __extension__ struct{ cl_uchar4 lo, hi; };
+#endif
+#if defined( __CL_UCHAR2__)
+ __cl_uchar2 v2[4];
+#endif
+#if defined( __CL_UCHAR4__)
+ __cl_uchar4 v4[2];
+#endif
+#if defined( __CL_UCHAR8__ )
+ __cl_uchar8 v8;
+#endif
+}cl_uchar8;
+
+typedef union
+{
+ cl_uchar CL_ALIGNED(16) s[16];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_uchar x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
+ __extension__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
+ __extension__ struct{ cl_uchar8 lo, hi; };
+#endif
+#if defined( __CL_UCHAR2__)
+ __cl_uchar2 v2[8];
+#endif
+#if defined( __CL_UCHAR4__)
+ __cl_uchar4 v4[4];
+#endif
+#if defined( __CL_UCHAR8__ )
+ __cl_uchar8 v8[2];
+#endif
+#if defined( __CL_UCHAR16__ )
+ __cl_uchar16 v16;
+#endif
+}cl_uchar16;
+
+
+/* ---- cl_shortn ---- */
+typedef union
+{
+ cl_short CL_ALIGNED(4) s[2];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_short x, y; };
+ __extension__ struct{ cl_short s0, s1; };
+ __extension__ struct{ cl_short lo, hi; };
+#endif
+#if defined( __CL_SHORT2__)
+ __cl_short2 v2;
+#endif
+}cl_short2;
+
+typedef union
+{
+ cl_short CL_ALIGNED(8) s[4];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_short x, y, z, w; };
+ __extension__ struct{ cl_short s0, s1, s2, s3; };
+ __extension__ struct{ cl_short2 lo, hi; };
+#endif
+#if defined( __CL_SHORT2__)
+ __cl_short2 v2[2];
+#endif
+#if defined( __CL_SHORT4__)
+ __cl_short4 v4;
+#endif
+}cl_short4;
+
+/* cl_short3 is identical in size, alignment and behavior to cl_short4. See section 6.1.5. */
+typedef cl_short4 cl_short3;
+
+typedef union
+{
+ cl_short CL_ALIGNED(16) s[8];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_short x, y, z, w; };
+ __extension__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7; };
+ __extension__ struct{ cl_short4 lo, hi; };
+#endif
+#if defined( __CL_SHORT2__)
+ __cl_short2 v2[4];
+#endif
+#if defined( __CL_SHORT4__)
+ __cl_short4 v4[2];
+#endif
+#if defined( __CL_SHORT8__ )
+ __cl_short8 v8;
+#endif
+}cl_short8;
+
+typedef union
+{
+ cl_short CL_ALIGNED(32) s[16];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_short x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
+ __extension__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
+ __extension__ struct{ cl_short8 lo, hi; };
+#endif
+#if defined( __CL_SHORT2__)
+ __cl_short2 v2[8];
+#endif
+#if defined( __CL_SHORT4__)
+ __cl_short4 v4[4];
+#endif
+#if defined( __CL_SHORT8__ )
+ __cl_short8 v8[2];
+#endif
+#if defined( __CL_SHORT16__ )
+ __cl_short16 v16;
+#endif
+}cl_short16;
+
+
+/* ---- cl_ushortn ---- */
+typedef union
+{
+ cl_ushort CL_ALIGNED(4) s[2];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_ushort x, y; };
+ __extension__ struct{ cl_ushort s0, s1; };
+ __extension__ struct{ cl_ushort lo, hi; };
+#endif
+#if defined( __CL_USHORT2__)
+ __cl_ushort2 v2;
+#endif
+}cl_ushort2;
+
+typedef union
+{
+ cl_ushort CL_ALIGNED(8) s[4];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_ushort x, y, z, w; };
+ __extension__ struct{ cl_ushort s0, s1, s2, s3; };
+ __extension__ struct{ cl_ushort2 lo, hi; };
+#endif
+#if defined( __CL_USHORT2__)
+ __cl_ushort2 v2[2];
+#endif
+#if defined( __CL_USHORT4__)
+ __cl_ushort4 v4;
+#endif
+}cl_ushort4;
+
+/* cl_ushort3 is identical in size, alignment and behavior to cl_ushort4. See section 6.1.5. */
+typedef cl_ushort4 cl_ushort3;
+
+typedef union
+{
+ cl_ushort CL_ALIGNED(16) s[8];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_ushort x, y, z, w; };
+ __extension__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7; };
+ __extension__ struct{ cl_ushort4 lo, hi; };
+#endif
+#if defined( __CL_USHORT2__)
+ __cl_ushort2 v2[4];
+#endif
+#if defined( __CL_USHORT4__)
+ __cl_ushort4 v4[2];
+#endif
+#if defined( __CL_USHORT8__ )
+ __cl_ushort8 v8;
+#endif
+}cl_ushort8;
+
+typedef union
+{
+ cl_ushort CL_ALIGNED(32) s[16];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_ushort x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
+ __extension__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
+ __extension__ struct{ cl_ushort8 lo, hi; };
+#endif
+#if defined( __CL_USHORT2__)
+ __cl_ushort2 v2[8];
+#endif
+#if defined( __CL_USHORT4__)
+ __cl_ushort4 v4[4];
+#endif
+#if defined( __CL_USHORT8__ )
+ __cl_ushort8 v8[2];
+#endif
+#if defined( __CL_USHORT16__ )
+ __cl_ushort16 v16;
+#endif
+}cl_ushort16;
+
+/* ---- cl_intn ---- */
+typedef union
+{
+ cl_int CL_ALIGNED(8) s[2];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_int x, y; };
+ __extension__ struct{ cl_int s0, s1; };
+ __extension__ struct{ cl_int lo, hi; };
+#endif
+#if defined( __CL_INT2__)
+ __cl_int2 v2;
+#endif
+}cl_int2;
+
+typedef union
+{
+ cl_int CL_ALIGNED(16) s[4];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_int x, y, z, w; };
+ __extension__ struct{ cl_int s0, s1, s2, s3; };
+ __extension__ struct{ cl_int2 lo, hi; };
+#endif
+#if defined( __CL_INT2__)
+ __cl_int2 v2[2];
+#endif
+#if defined( __CL_INT4__)
+ __cl_int4 v4;
+#endif
+}cl_int4;
+
+/* cl_int3 is identical in size, alignment and behavior to cl_int4. See section 6.1.5. */
+typedef cl_int4 cl_int3;
+
+typedef union
+{
+ cl_int CL_ALIGNED(32) s[8];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_int x, y, z, w; };
+ __extension__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7; };
+ __extension__ struct{ cl_int4 lo, hi; };
+#endif
+#if defined( __CL_INT2__)
+ __cl_int2 v2[4];
+#endif
+#if defined( __CL_INT4__)
+ __cl_int4 v4[2];
+#endif
+#if defined( __CL_INT8__ )
+ __cl_int8 v8;
+#endif
+}cl_int8;
+
+typedef union
+{
+ cl_int CL_ALIGNED(64) s[16];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_int x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
+ __extension__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
+ __extension__ struct{ cl_int8 lo, hi; };
+#endif
+#if defined( __CL_INT2__)
+ __cl_int2 v2[8];
+#endif
+#if defined( __CL_INT4__)
+ __cl_int4 v4[4];
+#endif
+#if defined( __CL_INT8__ )
+ __cl_int8 v8[2];
+#endif
+#if defined( __CL_INT16__ )
+ __cl_int16 v16;
+#endif
+}cl_int16;
+
+
+/* ---- cl_uintn ---- */
+typedef union
+{
+ cl_uint CL_ALIGNED(8) s[2];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_uint x, y; };
+ __extension__ struct{ cl_uint s0, s1; };
+ __extension__ struct{ cl_uint lo, hi; };
+#endif
+#if defined( __CL_UINT2__)
+ __cl_uint2 v2;
+#endif
+}cl_uint2;
+
+typedef union
+{
+ cl_uint CL_ALIGNED(16) s[4];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_uint x, y, z, w; };
+ __extension__ struct{ cl_uint s0, s1, s2, s3; };
+ __extension__ struct{ cl_uint2 lo, hi; };
+#endif
+#if defined( __CL_UINT2__)
+ __cl_uint2 v2[2];
+#endif
+#if defined( __CL_UINT4__)
+ __cl_uint4 v4;
+#endif
+}cl_uint4;
+
+/* cl_uint3 is identical in size, alignment and behavior to cl_uint4. See section 6.1.5. */
+typedef cl_uint4 cl_uint3;
+
+typedef union
+{
+ cl_uint CL_ALIGNED(32) s[8];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_uint x, y, z, w; };
+ __extension__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7; };
+ __extension__ struct{ cl_uint4 lo, hi; };
+#endif
+#if defined( __CL_UINT2__)
+ __cl_uint2 v2[4];
+#endif
+#if defined( __CL_UINT4__)
+ __cl_uint4 v4[2];
+#endif
+#if defined( __CL_UINT8__ )
+ __cl_uint8 v8;
+#endif
+}cl_uint8;
+
+typedef union
+{
+ cl_uint CL_ALIGNED(64) s[16];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_uint x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
+ __extension__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
+ __extension__ struct{ cl_uint8 lo, hi; };
+#endif
+#if defined( __CL_UINT2__)
+ __cl_uint2 v2[8];
+#endif
+#if defined( __CL_UINT4__)
+ __cl_uint4 v4[4];
+#endif
+#if defined( __CL_UINT8__ )
+ __cl_uint8 v8[2];
+#endif
+#if defined( __CL_UINT16__ )
+ __cl_uint16 v16;
+#endif
+}cl_uint16;
+
+/* ---- cl_longn ---- */
+typedef union
+{
+ cl_long CL_ALIGNED(16) s[2];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_long x, y; };
+ __extension__ struct{ cl_long s0, s1; };
+ __extension__ struct{ cl_long lo, hi; };
+#endif
+#if defined( __CL_LONG2__)
+ __cl_long2 v2;
+#endif
+}cl_long2;
+
+typedef union
+{
+ cl_long CL_ALIGNED(32) s[4];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_long x, y, z, w; };
+ __extension__ struct{ cl_long s0, s1, s2, s3; };
+ __extension__ struct{ cl_long2 lo, hi; };
+#endif
+#if defined( __CL_LONG2__)
+ __cl_long2 v2[2];
+#endif
+#if defined( __CL_LONG4__)
+ __cl_long4 v4;
+#endif
+}cl_long4;
+
+/* cl_long3 is identical in size, alignment and behavior to cl_long4. See section 6.1.5. */
+typedef cl_long4 cl_long3;
+
+typedef union
+{
+ cl_long CL_ALIGNED(64) s[8];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_long x, y, z, w; };
+ __extension__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7; };
+ __extension__ struct{ cl_long4 lo, hi; };
+#endif
+#if defined( __CL_LONG2__)
+ __cl_long2 v2[4];
+#endif
+#if defined( __CL_LONG4__)
+ __cl_long4 v4[2];
+#endif
+#if defined( __CL_LONG8__ )
+ __cl_long8 v8;
+#endif
+}cl_long8;
+
+typedef union
+{
+ cl_long CL_ALIGNED(128) s[16];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_long x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
+ __extension__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
+ __extension__ struct{ cl_long8 lo, hi; };
+#endif
+#if defined( __CL_LONG2__)
+ __cl_long2 v2[8];
+#endif
+#if defined( __CL_LONG4__)
+ __cl_long4 v4[4];
+#endif
+#if defined( __CL_LONG8__ )
+ __cl_long8 v8[2];
+#endif
+#if defined( __CL_LONG16__ )
+ __cl_long16 v16;
+#endif
+}cl_long16;
+
+
+/* ---- cl_ulongn ---- */
+typedef union
+{
+ cl_ulong CL_ALIGNED(16) s[2];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_ulong x, y; };
+ __extension__ struct{ cl_ulong s0, s1; };
+ __extension__ struct{ cl_ulong lo, hi; };
+#endif
+#if defined( __CL_ULONG2__)
+ __cl_ulong2 v2;
+#endif
+}cl_ulong2;
+
+typedef union
+{
+ cl_ulong CL_ALIGNED(32) s[4];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_ulong x, y, z, w; };
+ __extension__ struct{ cl_ulong s0, s1, s2, s3; };
+ __extension__ struct{ cl_ulong2 lo, hi; };
+#endif
+#if defined( __CL_ULONG2__)
+ __cl_ulong2 v2[2];
+#endif
+#if defined( __CL_ULONG4__)
+ __cl_ulong4 v4;
+#endif
+}cl_ulong4;
+
+/* cl_ulong3 is identical in size, alignment and behavior to cl_ulong4. See section 6.1.5. */
+typedef cl_ulong4 cl_ulong3;
+
+typedef union
+{
+ cl_ulong CL_ALIGNED(64) s[8];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_ulong x, y, z, w; };
+ __extension__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7; };
+ __extension__ struct{ cl_ulong4 lo, hi; };
+#endif
+#if defined( __CL_ULONG2__)
+ __cl_ulong2 v2[4];
+#endif
+#if defined( __CL_ULONG4__)
+ __cl_ulong4 v4[2];
+#endif
+#if defined( __CL_ULONG8__ )
+ __cl_ulong8 v8;
+#endif
+}cl_ulong8;
+
+typedef union
+{
+ cl_ulong CL_ALIGNED(128) s[16];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_ulong x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
+ __extension__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
+ __extension__ struct{ cl_ulong8 lo, hi; };
+#endif
+#if defined( __CL_ULONG2__)
+ __cl_ulong2 v2[8];
+#endif
+#if defined( __CL_ULONG4__)
+ __cl_ulong4 v4[4];
+#endif
+#if defined( __CL_ULONG8__ )
+ __cl_ulong8 v8[2];
+#endif
+#if defined( __CL_ULONG16__ )
+ __cl_ulong16 v16;
+#endif
+}cl_ulong16;
+
+
+/* --- cl_floatn ---- */
+
+typedef union
+{
+ cl_float CL_ALIGNED(8) s[2];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_float x, y; };
+ __extension__ struct{ cl_float s0, s1; };
+ __extension__ struct{ cl_float lo, hi; };
+#endif
+#if defined( __CL_FLOAT2__)
+ __cl_float2 v2;
+#endif
+}cl_float2;
+
+typedef union
+{
+ cl_float CL_ALIGNED(16) s[4];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_float x, y, z, w; };
+ __extension__ struct{ cl_float s0, s1, s2, s3; };
+ __extension__ struct{ cl_float2 lo, hi; };
+#endif
+#if defined( __CL_FLOAT2__)
+ __cl_float2 v2[2];
+#endif
+#if defined( __CL_FLOAT4__)
+ __cl_float4 v4;
+#endif
+}cl_float4;
+
+/* cl_float3 is identical in size, alignment and behavior to cl_float4. See section 6.1.5. */
+typedef cl_float4 cl_float3;
+
+typedef union
+{
+ cl_float CL_ALIGNED(32) s[8];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_float x, y, z, w; };
+ __extension__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7; };
+ __extension__ struct{ cl_float4 lo, hi; };
+#endif
+#if defined( __CL_FLOAT2__)
+ __cl_float2 v2[4];
+#endif
+#if defined( __CL_FLOAT4__)
+ __cl_float4 v4[2];
+#endif
+#if defined( __CL_FLOAT8__ )
+ __cl_float8 v8;
+#endif
+}cl_float8;
+
+typedef union
+{
+ cl_float CL_ALIGNED(64) s[16];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_float x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
+ __extension__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
+ __extension__ struct{ cl_float8 lo, hi; };
+#endif
+#if defined( __CL_FLOAT2__)
+ __cl_float2 v2[8];
+#endif
+#if defined( __CL_FLOAT4__)
+ __cl_float4 v4[4];
+#endif
+#if defined( __CL_FLOAT8__ )
+ __cl_float8 v8[2];
+#endif
+#if defined( __CL_FLOAT16__ )
+ __cl_float16 v16;
+#endif
+}cl_float16;
+
+/* --- cl_doublen ---- */
+
+typedef union
+{
+ cl_double CL_ALIGNED(16) s[2];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_double x, y; };
+ __extension__ struct{ cl_double s0, s1; };
+ __extension__ struct{ cl_double lo, hi; };
+#endif
+#if defined( __CL_DOUBLE2__)
+ __cl_double2 v2;
+#endif
+}cl_double2;
+
+typedef union
+{
+ cl_double CL_ALIGNED(32) s[4];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_double x, y, z, w; };
+ __extension__ struct{ cl_double s0, s1, s2, s3; };
+ __extension__ struct{ cl_double2 lo, hi; };
+#endif
+#if defined( __CL_DOUBLE2__)
+ __cl_double2 v2[2];
+#endif
+#if defined( __CL_DOUBLE4__)
+ __cl_double4 v4;
+#endif
+}cl_double4;
+
+/* cl_double3 is identical in size, alignment and behavior to cl_double4. See section 6.1.5. */
+typedef cl_double4 cl_double3;
+
+typedef union
+{
+ cl_double CL_ALIGNED(64) s[8];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_double x, y, z, w; };
+ __extension__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7; };
+ __extension__ struct{ cl_double4 lo, hi; };
+#endif
+#if defined( __CL_DOUBLE2__)
+ __cl_double2 v2[4];
+#endif
+#if defined( __CL_DOUBLE4__)
+ __cl_double4 v4[2];
+#endif
+#if defined( __CL_DOUBLE8__ )
+ __cl_double8 v8;
+#endif
+}cl_double8;
+
+typedef union
+{
+ cl_double CL_ALIGNED(128) s[16];
+#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
+ __extension__ struct{ cl_double x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
+ __extension__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
+ __extension__ struct{ cl_double8 lo, hi; };
+#endif
+#if defined( __CL_DOUBLE2__)
+ __cl_double2 v2[8];
+#endif
+#if defined( __CL_DOUBLE4__)
+ __cl_double4 v4[4];
+#endif
+#if defined( __CL_DOUBLE8__ )
+ __cl_double8 v8[2];
+#endif
+#if defined( __CL_DOUBLE16__ )
+ __cl_double16 v16;
+#endif
+}cl_double16;
+
+/* Macro to facilitate debugging
+ * Usage:
+ * Place CL_PROGRAM_STRING_DEBUG_INFO on the line before the first line of your source.
+ * The first line ends with: CL_PROGRAM_STRING_DEBUG_INFO \"
+ * Each line thereafter of OpenCL C source must end with: \n\
+ * The last line ends in ";
+ *
+ * Example:
+ *
+ * const char *my_program = CL_PROGRAM_STRING_DEBUG_INFO "\
+ * kernel void foo( int a, float * b ) \n\
+ * { \n\
+ * // my comment \n\
+ * *b[ get_global_id(0)] = a; \n\
+ * } \n\
+ * ";
+ *
+ * This should correctly set up the line, (column) and file information for your source
+ * string so you can do source level debugging.
+ */
+#define __CL_STRINGIFY( _x ) # _x
+#define _CL_STRINGIFY( _x ) __CL_STRINGIFY( _x )
+#define CL_PROGRAM_STRING_DEBUG_INFO "#line " _CL_STRINGIFY(__LINE__) " \"" __FILE__ "\" \n\n"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __CL_PLATFORM_H */
diff --git a/include/CL/opencl.h b/include/CL/opencl.h
new file mode 100644
index 0000000000..3f00524719
--- /dev/null
+++ b/include/CL/opencl.h
@@ -0,0 +1,54 @@
+/*******************************************************************************
+ * Copyright (c) 2008-2012 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ ******************************************************************************/
+
+/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
+
+#ifndef __OPENCL_H
+#define __OPENCL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __APPLE__
+
+#include <OpenCL/cl.h>
+#include <OpenCL/cl_gl.h>
+#include <OpenCL/cl_gl_ext.h>
+#include <OpenCL/cl_ext.h>
+
+#else
+
+#include <CL/cl.h>
+#include <CL/cl_gl.h>
+#include <CL/cl_gl_ext.h>
+#include <CL/cl_ext.h>
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __OPENCL_H */
+