Skip to content

Commit b4a0766

Browse files
committed
AMDGPU/GlobalISel: Select llvm.amdgcn.buffer.atomic.cmpswap
1 parent 9109ccc commit b4a0766

File tree

5 files changed

+431
-14
lines changed

5 files changed

+431
-14
lines changed

llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2698,6 +2698,9 @@ static unsigned getBufferAtomicPseudo(Intrinsic::ID IntrID) {
26982698
case Intrinsic::amdgcn_raw_buffer_atomic_dec:
26992699
case Intrinsic::amdgcn_struct_buffer_atomic_dec:
27002700
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC;
2701+
case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap:
2702+
case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap:
2703+
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP;
27012704
default:
27022705
llvm_unreachable("unhandled atomic opcode");
27032706
}
@@ -2729,7 +2732,7 @@ bool AMDGPULegalizerInfo::legalizeBufferAtomic(MachineInstr &MI,
27292732
const bool HasVIndex = MI.getNumOperands() == NumVIndexOps;
27302733
Register VIndex;
27312734
if (HasVIndex) {
2732-
VIndex = MI.getOperand(4).getReg();
2735+
VIndex = MI.getOperand(4 + OpOffset).getReg();
27332736
++OpOffset;
27342737
}
27352738

llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3159,6 +3159,32 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
31593159
// initialized.
31603160
break;
31613161
}
3162+
case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP: {
3163+
// vdata_out
3164+
OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
3165+
3166+
// vdata_in
3167+
OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
3168+
3169+
// cmp
3170+
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
3171+
3172+
// rsrc
3173+
OpdsMapping[3] = getSGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
3174+
3175+
// vindex
3176+
OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
3177+
3178+
// voffset
3179+
OpdsMapping[5] = getVGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI);
3180+
3181+
// soffset
3182+
OpdsMapping[6] = getSGPROpMapping(MI.getOperand(6).getReg(), MRI, *TRI);
3183+
3184+
// Any remaining operands are immediates and were correctly null
3185+
// initialized.
3186+
break;
3187+
}
31623188
case AMDGPU::G_INTRINSIC: {
31633189
switch (MI.getIntrinsicID()) {
31643190
default:

llvm/lib/Target/AMDGPU/BUFInstructions.td

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1443,14 +1443,13 @@ defm : BufferAtomicPatterns_NO_RTN<SIbuffer_atomic_pk_fadd, v2f16, "BUFFER_ATOMI
14431443

14441444
def : GCNPat<
14451445
(SIbuffer_atomic_cmpswap
1446-
i32:$data, i32:$cmp, v4i32:$rsrc, 0,
1447-
0, i32:$soffset, timm:$offset,
1448-
timm:$cachepolicy, 0),
1446+
i32:$data, i32:$cmp, v4i32:$rsrc, 0, 0, i32:$soffset,
1447+
timm:$offset, timm:$cachepolicy, 0),
14491448
(EXTRACT_SUBREG
14501449
(BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN
1451-
(REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1),
1452-
$rsrc, $soffset, (as_i16imm $offset), (extract_slc $cachepolicy)),
1453-
sub0)
1450+
(REG_SEQUENCE VReg_64, VGPR_32:$data, sub0, VGPR_32:$cmp, sub1),
1451+
SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset),
1452+
(extract_slc $cachepolicy)), sub0)
14541453
>;
14551454

14561455
def : GCNPat<
@@ -1460,8 +1459,8 @@ def : GCNPat<
14601459
timm:$cachepolicy, timm),
14611460
(EXTRACT_SUBREG
14621461
(BUFFER_ATOMIC_CMPSWAP_IDXEN_RTN
1463-
(REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1),
1464-
$vindex, $rsrc, $soffset, (as_i16imm $offset), (extract_slc $cachepolicy)),
1462+
(REG_SEQUENCE VReg_64, VGPR_32:$data, sub0, VGPR_32:$cmp, sub1),
1463+
VGPR_32:$vindex, SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (extract_slc $cachepolicy)),
14651464
sub0)
14661465
>;
14671466

@@ -1472,8 +1471,8 @@ def : GCNPat<
14721471
timm:$cachepolicy, 0),
14731472
(EXTRACT_SUBREG
14741473
(BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN
1475-
(REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1),
1476-
$voffset, $rsrc, $soffset, (as_i16imm $offset), (extract_slc $cachepolicy)),
1474+
(REG_SEQUENCE VReg_64, VGPR_32:$data, sub0, VGPR_32:$cmp, sub1),
1475+
VGPR_32:$voffset, SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (extract_slc $cachepolicy)),
14771476
sub0)
14781477
>;
14791478

@@ -1484,9 +1483,9 @@ def : GCNPat<
14841483
timm:$cachepolicy, timm),
14851484
(EXTRACT_SUBREG
14861485
(BUFFER_ATOMIC_CMPSWAP_BOTHEN_RTN
1487-
(REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1),
1488-
(REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1),
1489-
$rsrc, $soffset, (as_i16imm $offset), (extract_slc $cachepolicy)),
1486+
(REG_SEQUENCE VReg_64, VGPR_32:$data, sub0, VGPR_32:$cmp, sub1),
1487+
(REG_SEQUENCE VReg_64, VGPR_32:$vindex, sub0, VGPR_32:$voffset, sub1),
1488+
SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (extract_slc $cachepolicy)),
14901489
sub0)
14911490
>;
14921491

Lines changed: 195 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,195 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2+
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
3+
4+
; Natural mapping
5+
define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, i32 %cmp, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
6+
; CHECK-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
7+
; CHECK: bb.1 (%ir-block.0):
8+
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
9+
; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
10+
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
11+
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
12+
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
13+
; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
14+
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
15+
; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
16+
; CHECK: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
17+
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
18+
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
19+
; CHECK: [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
20+
; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN]].sub0
21+
; CHECK: $vgpr0 = COPY [[COPY8]]
22+
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
23+
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
24+
%cast = bitcast i32 %ret to float
25+
ret float %cast
26+
}
27+
28+
; Natural mapping
29+
define amdgpu_ps void @raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, i32 %cmp, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
30+
; CHECK-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
31+
; CHECK: bb.1 (%ir-block.0):
32+
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
33+
; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
34+
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
35+
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
36+
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
37+
; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
38+
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
39+
; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
40+
; CHECK: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
41+
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
42+
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
43+
; CHECK: [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
44+
; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN]].sub0
45+
; CHECK: S_ENDPGM 0
46+
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
47+
ret void
48+
}
49+
50+
; All operands need regbank legalization
51+
define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset(i32 inreg %val, i32 inreg %cmp, <4 x i32> %rsrc, i32 inreg %voffset, i32 %soffset) {
52+
; CHECK-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
53+
; CHECK: bb.1 (%ir-block.0):
54+
; CHECK: successors: %bb.2(0x80000000)
55+
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
56+
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
57+
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
58+
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
59+
; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
60+
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
61+
; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
62+
; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
63+
; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
64+
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
65+
; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
66+
; CHECK: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
67+
; CHECK: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
68+
; CHECK: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
69+
; CHECK: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
70+
; CHECK: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
71+
; CHECK: bb.2:
72+
; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
73+
; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]].sub0, implicit $exec
74+
; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]].sub1, implicit $exec
75+
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
76+
; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY11]], implicit $exec
77+
; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]].sub0, implicit $exec
78+
; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]].sub1, implicit $exec
79+
; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
80+
; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY12]], implicit $exec
81+
; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
82+
; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
83+
; CHECK: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
84+
; CHECK: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
85+
; CHECK: [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
86+
; CHECK: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
87+
; CHECK: [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY10]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
88+
; CHECK: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN]].sub0
89+
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
90+
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
91+
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
92+
; CHECK: bb.3:
93+
; CHECK: successors: %bb.4(0x80000000)
94+
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
95+
; CHECK: bb.4:
96+
; CHECK: $vgpr0 = COPY [[COPY13]]
97+
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
98+
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
99+
%cast = bitcast i32 %ret to float
100+
ret float %cast
101+
}
102+
103+
; All operands need regbank legalization
104+
define amdgpu_ps void @raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset(i32 inreg %val, i32 inreg %cmp, <4 x i32> %rsrc, i32 inreg %voffset, i32 %soffset) {
105+
; CHECK-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
106+
; CHECK: bb.1 (%ir-block.0):
107+
; CHECK: successors: %bb.2(0x80000000)
108+
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
109+
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
110+
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
111+
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
112+
; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
113+
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
114+
; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
115+
; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
116+
; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
117+
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
118+
; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
119+
; CHECK: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
120+
; CHECK: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
121+
; CHECK: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
122+
; CHECK: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
123+
; CHECK: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
124+
; CHECK: bb.2:
125+
; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
126+
; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]].sub0, implicit $exec
127+
; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]].sub1, implicit $exec
128+
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
129+
; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY11]], implicit $exec
130+
; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]].sub0, implicit $exec
131+
; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]].sub1, implicit $exec
132+
; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
133+
; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY12]], implicit $exec
134+
; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
135+
; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
136+
; CHECK: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
137+
; CHECK: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
138+
; CHECK: [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
139+
; CHECK: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
140+
; CHECK: [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY10]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7", align 1, addrspace 4)
141+
; CHECK: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN]].sub0
142+
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
143+
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
144+
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
145+
; CHECK: bb.3:
146+
; CHECK: successors: %bb.4(0x80000000)
147+
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
148+
; CHECK: bb.4:
149+
; CHECK: S_ENDPGM 0
150+
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
151+
ret void
152+
}
153+
154+
define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095(i32 %val, i32 %cmp, <4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) {
155+
; CHECK-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
156+
; CHECK: bb.1 (%ir-block.0):
157+
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
158+
; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
159+
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
160+
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
161+
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
162+
; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
163+
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
164+
; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
165+
; CHECK: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
166+
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
167+
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
168+
; CHECK: [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 4095, 0, implicit $exec :: (volatile dereferenceable load store 4 on custom "TargetCustom7" + 4095, align 1, addrspace 4)
169+
; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN]].sub0
170+
; CHECK: $vgpr0 = COPY [[COPY8]]
171+
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
172+
%voffset = add i32 %voffset.base, 4095
173+
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
174+
%cast = bitcast i32 %ret to float
175+
ret float %cast
176+
}
177+
178+
179+
; FIXME: 64-bit not handled
180+
; ; Natural mapping
181+
; define amdgpu_ps <2 x float> @raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i64 %val, i64 %cmp, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
182+
; %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
183+
; %cast = bitcast i64 %ret to <2 x float>
184+
; ret <2 x float> %cast
185+
; }
186+
187+
; define amdgpu_ps void @raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i64 %val, i64 %cmp, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
188+
; %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
189+
; ret void
190+
; }
191+
192+
declare i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32, i32, <4 x i32>, i32, i32, i32 immarg) #0
193+
declare i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64, i64, <4 x i32>, i32, i32, i32 immarg) #0
194+
195+
attributes #0 = { nounwind }

0 commit comments

Comments
 (0)