|
| 1 | +// RUN: mlir-vulkan-runner %s \ |
| 2 | +// RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \ |
| 3 | +// RUN: --entry-point-result=void | FileCheck %s |
| 4 | + |
| 5 | +// This kernel computes the argmax (index of the maximum element) from an array |
| 6 | +// of integers. Each thread computes a lane maximum using a single `scf.for`. |
| 7 | +// Then `gpu.subgroup_reduce` is used to find the maximum across the entire |
| 8 | +// subgroup, which is then used by SPIR-V subgroup ops to compute the argmax |
| 9 | +// of the entire input array. Note that this kernel only works if we have a |
| 10 | +// single workgroup. |
| 11 | + |
| 12 | +// CHECK: [15] |
| 13 | +module attributes { |
| 14 | + gpu.container_module, |
| 15 | + spirv.target_env = #spirv.target_env< |
| 16 | + #spirv.vce<v1.3, [Shader, Groups, GroupNonUniformArithmetic, GroupNonUniformBallot], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>> |
| 17 | +} { |
| 18 | + gpu.module @kernels { |
| 19 | + gpu.func @kernel_argmax(%input : memref<128xi32>, %output : memref<1xi32>, %total_count_buf : memref<1xi32>) kernel |
| 20 | + attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [32, 1, 1]>} { |
| 21 | + %idx0 = arith.constant 0 : index |
| 22 | + %idx1 = arith.constant 1 : index |
| 23 | + |
| 24 | + %total_count = memref.load %total_count_buf[%idx0] : memref<1xi32> |
| 25 | + %lane_count_idx = gpu.subgroup_size : index |
| 26 | + %lane_count_i32 = index.castu %lane_count_idx : index to i32 |
| 27 | + %lane_id_idx = gpu.thread_id x |
| 28 | + %lane_id_i32 = index.castu %lane_id_idx : index to i32 |
| 29 | + %lane_res_init = arith.constant 0 : i32 |
| 30 | + %lane_max_init = memref.load %input[%lane_id_idx] : memref<128xi32> |
| 31 | + %num_batches_i32 = arith.divui %total_count, %lane_count_i32 : i32 |
| 32 | + %num_batches_idx = index.castu %num_batches_i32 : i32 to index |
| 33 | + |
| 34 | + %lane_res, %lane_max = scf.for %iter = %idx1 to %num_batches_idx step %idx1 |
| 35 | + iter_args(%lane_res_iter = %lane_res_init, %lane_max_iter = %lane_max_init) -> (i32, i32) { |
| 36 | + %iter_i32 = index.castu %iter : index to i32 |
| 37 | + %mul = arith.muli %lane_count_i32, %iter_i32 : i32 |
| 38 | + %idx_i32 = arith.addi %mul, %lane_id_i32 : i32 |
| 39 | + %idx = index.castu %idx_i32 : i32 to index |
| 40 | + %elem = memref.load %input[%idx] : memref<128xi32> |
| 41 | + %gt = arith.cmpi sgt, %elem, %lane_max_iter : i32 |
| 42 | + %lane_res_next = arith.select %gt, %idx_i32, %lane_res_iter : i32 |
| 43 | + %lane_max_next = arith.select %gt, %elem, %lane_max_iter : i32 |
| 44 | + scf.yield %lane_res_next, %lane_max_next : i32, i32 |
| 45 | + } |
| 46 | + |
| 47 | + %subgroup_max = gpu.subgroup_reduce maxsi %lane_max : (i32) -> (i32) |
| 48 | + %eq = arith.cmpi eq, %lane_max, %subgroup_max : i32 |
| 49 | + %ballot = spirv.GroupNonUniformBallot <Subgroup> %eq : vector<4xi32> |
| 50 | + %lsb = spirv.GroupNonUniformBallotFindLSB <Subgroup> %ballot : vector<4xi32>, i32 |
| 51 | + %cond = arith.cmpi eq, %lsb, %lane_id_i32 : i32 |
| 52 | + |
| 53 | + scf.if %cond { |
| 54 | + memref.store %lane_res, %output[%idx0] : memref<1xi32> |
| 55 | + } |
| 56 | + |
| 57 | + gpu.return |
| 58 | + } |
| 59 | + } |
| 60 | + |
| 61 | + func.func @main() { |
| 62 | + // Allocate 3 buffers. |
| 63 | + %in_buf = memref.alloc() : memref<128xi32> |
| 64 | + %out_buf = memref.alloc() : memref<1xi32> |
| 65 | + %total_count_buf = memref.alloc() : memref<1xi32> |
| 66 | + |
| 67 | + // Constants. |
| 68 | + %cst0 = arith.constant 0 : i32 |
| 69 | + %idx0 = arith.constant 0 : index |
| 70 | + %idx1 = arith.constant 1 : index |
| 71 | + %idx16 = arith.constant 16 : index |
| 72 | + %idx32 = arith.constant 32 : index |
| 73 | + %idx48 = arith.constant 48 : index |
| 74 | + %idx64 = arith.constant 64 : index |
| 75 | + %idx80 = arith.constant 80 : index |
| 76 | + %idx96 = arith.constant 96 : index |
| 77 | + %idx112 = arith.constant 112 : index |
| 78 | + |
| 79 | + // Initialize input buffer. |
| 80 | + %in_vec = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]> : vector<16xi32> |
| 81 | + vector.store %in_vec, %in_buf[%idx0] : memref<128xi32>, vector<16xi32> |
| 82 | + vector.store %in_vec, %in_buf[%idx16] : memref<128xi32>, vector<16xi32> |
| 83 | + vector.store %in_vec, %in_buf[%idx32] : memref<128xi32>, vector<16xi32> |
| 84 | + vector.store %in_vec, %in_buf[%idx48] : memref<128xi32>, vector<16xi32> |
| 85 | + vector.store %in_vec, %in_buf[%idx64] : memref<128xi32>, vector<16xi32> |
| 86 | + vector.store %in_vec, %in_buf[%idx80] : memref<128xi32>, vector<16xi32> |
| 87 | + vector.store %in_vec, %in_buf[%idx96] : memref<128xi32>, vector<16xi32> |
| 88 | + vector.store %in_vec, %in_buf[%idx112] : memref<128xi32>, vector<16xi32> |
| 89 | + |
| 90 | + // Initialize output buffer. |
| 91 | + %out_buf2 = memref.cast %out_buf : memref<1xi32> to memref<?xi32> |
| 92 | + call @fillResource1DInt(%out_buf2, %cst0) : (memref<?xi32>, i32) -> () |
| 93 | + |
| 94 | + // Total number of scalars. |
| 95 | + %total_count = arith.constant 128 : i32 |
| 96 | + %total_count_buf2 = memref.cast %total_count_buf : memref<1xi32> to memref<?xi32> |
| 97 | + call @fillResource1DInt(%total_count_buf2, %total_count) : (memref<?xi32>, i32) -> () |
| 98 | + |
| 99 | + // Launch kernel function and print output. |
| 100 | + gpu.launch_func @kernels::@kernel_argmax |
| 101 | + blocks in (%idx1, %idx1, %idx1) threads in (%idx32, %idx1, %idx1) |
| 102 | + args(%in_buf : memref<128xi32>, %out_buf : memref<1xi32>, %total_count_buf : memref<1xi32>) |
| 103 | + %out_buf3 = memref.cast %out_buf2 : memref<?xi32> to memref<*xi32> |
| 104 | + call @printMemrefI32(%out_buf3) : (memref<*xi32>) -> () |
| 105 | + return |
| 106 | + } |
| 107 | + func.func private @fillResource1DInt(%0 : memref<?xi32>, %1 : i32) |
| 108 | + func.func private @printMemrefI32(%ptr : memref<*xi32>) |
| 109 | +} |
0 commit comments