|
| 1 | +; 64 storage locations is sufficient for all current-generation NVIDIA GPUs |
| 2 | +; 64 bits per warp is sufficient for all fundamental data types |
| 3 | +; Reducing storage for small data types or increasing it for user-defined types |
| 4 | +; will likely require an additional pass to track group algorithm usage |
| 5 | +@__clc__group_scratch = internal addrspace(3) global [64 x i64] undef, align 1 |
| 6 | + |
| 7 | +define i8 addrspace(3)* @__clc__get_group_scratch_bool() nounwind alwaysinline { |
| 8 | +entry: |
| 9 | + %ptr = getelementptr inbounds [64 x i64], [64 x i64] addrspace(3)* @__clc__group_scratch, i64 0, i64 0 |
| 10 | + %cast = bitcast i64 addrspace(3)* %ptr to i8 addrspace(3)* |
| 11 | + ret i8 addrspace(3)* %cast |
| 12 | +} |
| 13 | + |
| 14 | +define i8 addrspace(3)* @__clc__get_group_scratch_char() nounwind alwaysinline { |
| 15 | +entry: |
| 16 | + %ptr = getelementptr inbounds [64 x i64], [64 x i64] addrspace(3)* @__clc__group_scratch, i64 0, i64 0 |
| 17 | + %cast = bitcast i64 addrspace(3)* %ptr to i8 addrspace(3)* |
| 18 | + ret i8 addrspace(3)* %cast |
| 19 | +} |
| 20 | + |
| 21 | +define i16 addrspace(3)* @__clc__get_group_scratch_short() nounwind alwaysinline { |
| 22 | +entry: |
| 23 | + %ptr = getelementptr inbounds [64 x i64], [64 x i64] addrspace(3)* @__clc__group_scratch, i64 0, i64 0 |
| 24 | + %cast = bitcast i64 addrspace(3)* %ptr to i16 addrspace(3)* |
| 25 | + ret i16 addrspace(3)* %cast |
| 26 | +} |
| 27 | + |
| 28 | +define i32 addrspace(3)* @__clc__get_group_scratch_int() nounwind alwaysinline { |
| 29 | +entry: |
| 30 | + %ptr = getelementptr inbounds [64 x i64], [64 x i64] addrspace(3)* @__clc__group_scratch, i64 0, i64 0 |
| 31 | + %cast = bitcast i64 addrspace(3)* %ptr to i32 addrspace(3)* |
| 32 | + ret i32 addrspace(3)* %cast |
| 33 | +} |
| 34 | + |
| 35 | +define i64 addrspace(3)* @__clc__get_group_scratch_long() nounwind alwaysinline { |
| 36 | +entry: |
| 37 | + %ptr = getelementptr inbounds [64 x i64], [64 x i64] addrspace(3)* @__clc__group_scratch, i64 0, i64 0 |
| 38 | + %cast = bitcast i64 addrspace(3)* %ptr to i64 addrspace(3)* |
| 39 | + ret i64 addrspace(3)* %cast |
| 40 | +} |
| 41 | + |
| 42 | +define half addrspace(3)* @__clc__get_group_scratch_half() nounwind alwaysinline { |
| 43 | +entry: |
| 44 | + %ptr = getelementptr inbounds [64 x i64], [64 x i64] addrspace(3)* @__clc__group_scratch, i64 0, i64 0 |
| 45 | + %cast = bitcast i64 addrspace(3)* %ptr to half addrspace(3)* |
| 46 | + ret half addrspace(3)* %cast |
| 47 | +} |
| 48 | + |
| 49 | +define float addrspace(3)* @__clc__get_group_scratch_float() nounwind alwaysinline { |
| 50 | +entry: |
| 51 | + %ptr = getelementptr inbounds [64 x i64], [64 x i64] addrspace(3)* @__clc__group_scratch, i64 0, i64 0 |
| 52 | + %cast = bitcast i64 addrspace(3)* %ptr to float addrspace(3)* |
| 53 | + ret float addrspace(3)* %cast |
| 54 | +} |
| 55 | + |
| 56 | +define double addrspace(3)* @__clc__get_group_scratch_double() nounwind alwaysinline { |
| 57 | +entry: |
| 58 | + %ptr = getelementptr inbounds [64 x i64], [64 x i64] addrspace(3)* @__clc__group_scratch, i64 0, i64 0 |
| 59 | + %cast = bitcast i64 addrspace(3)* %ptr to double addrspace(3)* |
| 60 | + ret double addrspace(3)* %cast |
| 61 | +} |
0 commit comments