@@ -1963,7 +1963,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
1963
1963
return SimplifyDemandedVectorElts (Op, DemandedElts, UndefElts);
1964
1964
};
1965
1965
1966
- switch (II->getIntrinsicID ()) {
1966
+ Intrinsic::ID IID = II->getIntrinsicID ();
1967
+ switch (IID) {
1967
1968
default : break ;
1968
1969
case Intrinsic::objectsize:
1969
1970
if (Value *V = lowerObjectSizeCall (II, DL, &TLI, /* MustSucceed=*/ false ))
@@ -2046,14 +2047,14 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
2046
2047
// Canonicalize funnel shift right by constant to funnel shift left. This
2047
2048
// is not entirely arbitrary. For historical reasons, the backend may
2048
2049
// recognize rotate left patterns but miss rotate right patterns.
2049
- if (II-> getIntrinsicID () == Intrinsic::fshr) {
2050
+ if (IID == Intrinsic::fshr) {
2050
2051
// fshr X, Y, C --> fshl X, Y, (BitWidth - C)
2051
2052
Constant *LeftShiftC = ConstantExpr::getSub (WidthC, ShAmtC);
2052
2053
Module *Mod = II->getModule ();
2053
2054
Function *Fshl = Intrinsic::getDeclaration (Mod, Intrinsic::fshl, Ty);
2054
2055
return CallInst::Create (Fshl, { Op0, Op1, LeftShiftC });
2055
2056
}
2056
- assert (II-> getIntrinsicID () == Intrinsic::fshl &&
2057
+ assert (IID == Intrinsic::fshl &&
2057
2058
" All funnel shifts by simple constants should go left" );
2058
2059
2059
2060
// fshl(X, 0, C) --> shl X, C
@@ -2097,7 +2098,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
2097
2098
const APInt *C0, *C1;
2098
2099
Value *Arg0 = II->getArgOperand (0 );
2099
2100
Value *Arg1 = II->getArgOperand (1 );
2100
- bool IsSigned = II-> getIntrinsicID () == Intrinsic::sadd_with_overflow;
2101
+ bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2101
2102
bool HasNWAdd = IsSigned ? match (Arg0, m_NSWAdd (m_Value (X), m_APInt (C0)))
2102
2103
: match (Arg0, m_NUWAdd (m_Value (X), m_APInt (C0)));
2103
2104
if (HasNWAdd && match (Arg1, m_APInt (C1))) {
@@ -2107,8 +2108,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
2107
2108
if (!Overflow)
2108
2109
return replaceInstUsesWith (
2109
2110
*II, Builder.CreateBinaryIntrinsic (
2110
- II->getIntrinsicID (), X,
2111
- ConstantInt::get (Arg1->getType (), NewC)));
2111
+ IID, X, ConstantInt::get (Arg1->getType (), NewC)));
2112
2112
}
2113
2113
break ;
2114
2114
}
@@ -2156,7 +2156,6 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
2156
2156
case Intrinsic::ssub_sat: {
2157
2157
Value *Arg0 = II->getArgOperand (0 );
2158
2158
Value *Arg1 = II->getArgOperand (1 );
2159
- Intrinsic::ID IID = II->getIntrinsicID ();
2160
2159
2161
2160
// Make use of known overflow information.
2162
2161
OverflowResult OR;
@@ -2208,7 +2207,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
2208
2207
APInt NewVal;
2209
2208
bool IsUnsigned =
2210
2209
IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2211
- if (Other->getIntrinsicID () == II-> getIntrinsicID () &&
2210
+ if (Other->getIntrinsicID () == IID &&
2212
2211
match (Arg1, m_APInt (Val)) &&
2213
2212
match (Other->getArgOperand (0 ), m_Value (X)) &&
2214
2213
match (Other->getArgOperand (1 ), m_APInt (Val2))) {
@@ -2243,7 +2242,6 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
2243
2242
return I;
2244
2243
Value *Arg0 = II->getArgOperand (0 );
2245
2244
Value *Arg1 = II->getArgOperand (1 );
2246
- Intrinsic::ID IID = II->getIntrinsicID ();
2247
2245
Value *X, *Y;
2248
2246
if (match (Arg0, m_FNeg (m_Value (X))) && match (Arg1, m_FNeg (m_Value (Y))) &&
2249
2247
(Arg0->hasOneUse () || Arg1->hasOneUse ())) {
@@ -2373,8 +2371,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
2373
2371
Value *ExtSrc;
2374
2372
if (match (II->getArgOperand (0 ), m_OneUse (m_FPExt (m_Value (ExtSrc))))) {
2375
2373
// Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
2376
- Value *NarrowII =
2377
- Builder.CreateUnaryIntrinsic (II->getIntrinsicID (), ExtSrc, II);
2374
+ Value *NarrowII = Builder.CreateUnaryIntrinsic (IID, ExtSrc, II);
2378
2375
return new FPExtInst (NarrowII, II->getType ());
2379
2376
}
2380
2377
break ;
@@ -2727,7 +2724,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
2727
2724
Value *Arg1 = II->getArgOperand (1 );
2728
2725
2729
2726
Value *V;
2730
- switch (II-> getIntrinsicID () ) {
2727
+ switch (IID ) {
2731
2728
default : llvm_unreachable (" Case stmts out of sync!" );
2732
2729
case Intrinsic::x86_avx512_add_ps_512:
2733
2730
case Intrinsic::x86_avx512_add_pd_512:
@@ -2771,7 +2768,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
2771
2768
Value *RHS = Builder.CreateExtractElement (Arg1, (uint64_t )0 );
2772
2769
2773
2770
Value *V;
2774
- switch (II-> getIntrinsicID () ) {
2771
+ switch (IID ) {
2775
2772
default : llvm_unreachable (" Case stmts out of sync!" );
2776
2773
case Intrinsic::x86_avx512_mask_add_ss_round:
2777
2774
case Intrinsic::x86_avx512_mask_add_sd_round:
@@ -3363,8 +3360,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
3363
3360
}
3364
3361
3365
3362
// Check for constant LHS & RHS - in this case we just simplify.
3366
- bool Zext = (II-> getIntrinsicID () == Intrinsic::arm_neon_vmullu ||
3367
- II-> getIntrinsicID () == Intrinsic::aarch64_neon_umull);
3363
+ bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
3364
+ IID == Intrinsic::aarch64_neon_umull);
3368
3365
VectorType *NewVT = cast<VectorType>(II->getType ());
3369
3366
if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
3370
3367
if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
@@ -3441,7 +3438,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
3441
3438
APFloat Significand = frexp (C->getValueAPF (), Exp,
3442
3439
APFloat::rmNearestTiesToEven);
3443
3440
3444
- if (II-> getIntrinsicID () == Intrinsic::amdgcn_frexp_mant) {
3441
+ if (IID == Intrinsic::amdgcn_frexp_mant) {
3445
3442
return replaceInstUsesWith (CI, ConstantFP::get (II->getContext (),
3446
3443
Significand));
3447
3444
}
@@ -3626,7 +3623,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
3626
3623
}
3627
3624
}
3628
3625
3629
- bool Signed = II-> getIntrinsicID () == Intrinsic::amdgcn_sbfe;
3626
+ bool Signed = IID == Intrinsic::amdgcn_sbfe;
3630
3627
3631
3628
if (!CWidth || !COffset)
3632
3629
break ;
@@ -3659,7 +3656,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
3659
3656
if (EnBits == 0xf )
3660
3657
break ; // All inputs enabled.
3661
3658
3662
- bool IsCompr = II-> getIntrinsicID () == Intrinsic::amdgcn_exp_compr;
3659
+ bool IsCompr = IID == Intrinsic::amdgcn_exp_compr;
3663
3660
bool Changed = false ;
3664
3661
for (int I = 0 ; I < (IsCompr ? 2 : 4 ); ++I) {
3665
3662
if ((!IsCompr && (EnBits & (1 << I)) == 0 ) ||
@@ -3747,7 +3744,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
3747
3744
const ConstantInt *CC = cast<ConstantInt>(II->getArgOperand (2 ));
3748
3745
// Guard against invalid arguments.
3749
3746
int64_t CCVal = CC->getZExtValue ();
3750
- bool IsInteger = II-> getIntrinsicID () == Intrinsic::amdgcn_icmp;
3747
+ bool IsInteger = IID == Intrinsic::amdgcn_icmp;
3751
3748
if ((IsInteger && (CCVal < CmpInst::FIRST_ICMP_PREDICATE ||
3752
3749
CCVal > CmpInst::LAST_ICMP_PREDICATE)) ||
3753
3750
(!IsInteger && (CCVal < CmpInst::FIRST_FCMP_PREDICATE ||
@@ -3930,14 +3927,14 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
3930
3927
break ;
3931
3928
}
3932
3929
if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
3933
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
3930
+ if (auto *II2 = dyn_cast<IntrinsicInst>(BCI)) {
3934
3931
// If there is a stackrestore below this one, remove this one.
3935
- if (II ->getIntrinsicID () == Intrinsic::stackrestore)
3932
+ if (II2 ->getIntrinsicID () == Intrinsic::stackrestore)
3936
3933
return eraseInstFromFunction (CI);
3937
3934
3938
3935
// Bail if we cross over an intrinsic with side effects, such as
3939
3936
// llvm.stacksave, llvm.read_register, or llvm.setjmp.
3940
- if (II ->mayHaveSideEffects ()) {
3937
+ if (II2 ->mayHaveSideEffects ()) {
3941
3938
CannotRemove = true ;
3942
3939
break ;
3943
3940
}
0 commit comments