Reduction on SYCL-supported Devices¶
numba-dppy
does not yet provide any specific decorator to implement reduction kernels. However, a kernel reduction
can be written explicitly. This section provides two approaches for writing a kernel reduction using
numba_dppy.kernel
.
Examples¶
Example 1¶
This example demonstrates a summation reduction on a one-dimensional array.
Full example can be found at numba_dppy/examples/sum_reduction.py
.
In this example, to reduce the array we invoke the kernel multiple times.
@dppy.kernel
def sum_reduction_kernel(A, R, stride):
i = dppy.get_global_id(0)
# sum two element
R[i] = A[i] + A[i + stride]
# store the sum to be used in nex iteration
A[i] = R[i]
def sum_reduce(A):
"""Size of A should be power of two."""
total = len(A)
# max size will require half the size of A to store sum
R = np.array(np.random.random(math.ceil(total / 2)), dtype=A.dtype)
context = get_context()
with dpctl.device_context(context):
while total > 1:
global_size = total // 2
sum_reduction_kernel[global_size, dppy.DEFAULT_LOCAL_SIZE](
A, R, global_size
)
total = total // 2
return R[0]
Example 2¶
Full example can be found at
numba_dppy/examples/sum_reduction_recursive_ocl.py
.
@dppy.kernel
def sum_reduction_kernel(A, input_size, partial_sums):
local_id = dppy.get_local_id(0)
global_id = dppy.get_global_id(0)
group_size = dppy.get_local_size(0)
group_id = dppy.get_group_id(0)
local_sums = dppy.local.array(64, int32)
local_sums[local_id] = 0
if global_id < input_size:
local_sums[local_id] = A[global_id]
# Loop for computing local_sums : divide workgroup into 2 parts
stride = group_size // 2
while stride > 0:
# Waiting for each 2x2 addition into given workgroup
dppy.barrier(dppy.CLK_LOCAL_MEM_FENCE)
# Add elements 2 by 2 between local_id and local_id + stride
if local_id < stride:
local_sums[local_id] += local_sums[local_id + stride]
stride >>= 1
if local_id == 0:
partial_sums[group_id] = local_sums[0]
def sum_recursive_reduction(size, group_size, Dinp, Dpartial_sums):
result = 0
nb_work_groups = 0
passed_size = size
if size <= group_size:
nb_work_groups = 1
else:
nb_work_groups = size // group_size
if size % group_size != 0:
nb_work_groups += 1
passed_size = nb_work_groups * group_size
sum_reduction_kernel[passed_size, group_size](Dinp, size, Dpartial_sums)
if nb_work_groups <= group_size:
sum_reduction_kernel[group_size, group_size](
Dpartial_sums, nb_work_groups, Dinp
)
result = Dinp[0]
else:
result = sum_recursive_reduction(
nb_work_groups, group_size, Dpartial_sums, Dinp
)
return result
def sum_reduce(A):
global_size = len(A)
work_group_size = 64
nb_work_groups = global_size // work_group_size
if (global_size % work_group_size) != 0:
nb_work_groups += 1
partial_sums = np.zeros(nb_work_groups).astype(A.dtype)
context = get_context()
with dpctl.device_context(context):
inp_buf = dpctl_mem.MemoryUSMShared(A.size * A.dtype.itemsize)
inp_ndarray = np.ndarray(A.shape, buffer=inp_buf, dtype=A.dtype)
np.copyto(inp_ndarray, A)
partial_sums_buf = dpctl_mem.MemoryUSMShared(
partial_sums.size * partial_sums.dtype.itemsize
)
partial_sums_ndarray = np.ndarray(
partial_sums.shape, buffer=partial_sums_buf, dtype=partial_sums.dtype
)
np.copyto(partial_sums_ndarray, partial_sums)
result = sum_recursive_reduction(
global_size, work_group_size, inp_ndarray, partial_sums_ndarray
)
return result
Note
numba-dppy
does not yet provide any analogue to the numba.cuda.reduce
decorator for writing reductions kernel.
Such decorator will be added in future releases.
Full examples¶
numba_dppy/examples/sum_reduction_recursive_ocl.py
numba_dppy/examples/sum_reduction_ocl.py
numba_dppy/examples/sum_reduction.py