@@ -5595,11 +5595,11 @@ define i64 @trunc_with_first_order_recurrence() {
5595
5595
; CHECK-NEXT: [[C5:%.*]] = phi i64 [ [[C23]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
5596
5596
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
5597
5597
; CHECK-NEXT: [[X:%.*]] = phi i32 [ [[C24:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
5598
- ; CHECK-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
5598
+ ; CHECK-NEXT: [[Y :%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
5599
5599
; CHECK-NEXT: [[C6]] = trunc i64 [[INDVARS_IV]] to i32
5600
5600
; CHECK-NEXT: [[C8:%.*]] = mul i32 [[X]], [[C6]]
5601
5601
; CHECK-NEXT: [[C9:%.*]] = add i32 [[C8]], 42
5602
- ; CHECK-NEXT: [[C10:%.*]] = add i32 [[SCALAR_RECUR ]], [[C6]]
5602
+ ; CHECK-NEXT: [[C10:%.*]] = add i32 [[Y ]], [[C6]]
5603
5603
; CHECK-NEXT: [[C11:%.*]] = add i32 [[C10]], [[C9]]
5604
5604
; CHECK-NEXT: [[C12:%.*]] = sext i32 [[C11]] to i64
5605
5605
; CHECK-NEXT: [[C13:%.*]] = add i64 [[C5]], [[C12]]
@@ -5657,11 +5657,11 @@ define i64 @trunc_with_first_order_recurrence() {
5657
5657
; IND-NEXT: [[C5:%.*]] = phi i64 [ [[C23]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
5658
5658
; IND-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
5659
5659
; IND-NEXT: [[X:%.*]] = phi i32 [ [[C24:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
5660
- ; IND-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
5660
+ ; IND-NEXT: [[Y :%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
5661
5661
; IND-NEXT: [[C6]] = trunc i64 [[INDVARS_IV]] to i32
5662
5662
; IND-NEXT: [[C8:%.*]] = mul i32 [[X]], [[C6]]
5663
5663
; IND-NEXT: [[C9:%.*]] = add i32 [[C8]], 42
5664
- ; IND-NEXT: [[C10:%.*]] = add i32 [[SCALAR_RECUR ]], [[C6]]
5664
+ ; IND-NEXT: [[C10:%.*]] = add i32 [[Y ]], [[C6]]
5665
5665
; IND-NEXT: [[C11:%.*]] = add i32 [[C10]], [[C9]]
5666
5666
; IND-NEXT: [[C12:%.*]] = sext i32 [[C11]] to i64
5667
5667
; IND-NEXT: [[C13:%.*]] = add i64 [[C5]], [[C12]]
@@ -5735,11 +5735,11 @@ define i64 @trunc_with_first_order_recurrence() {
5735
5735
; UNROLL-NEXT: [[C5:%.*]] = phi i64 [ [[C23]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
5736
5736
; UNROLL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
5737
5737
; UNROLL-NEXT: [[X:%.*]] = phi i32 [ [[C24:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
5738
- ; UNROLL-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
5738
+ ; UNROLL-NEXT: [[Y :%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
5739
5739
; UNROLL-NEXT: [[C6]] = trunc i64 [[INDVARS_IV]] to i32
5740
5740
; UNROLL-NEXT: [[C8:%.*]] = mul i32 [[X]], [[C6]]
5741
5741
; UNROLL-NEXT: [[C9:%.*]] = add i32 [[C8]], 42
5742
- ; UNROLL-NEXT: [[C10:%.*]] = add i32 [[SCALAR_RECUR ]], [[C6]]
5742
+ ; UNROLL-NEXT: [[C10:%.*]] = add i32 [[Y ]], [[C6]]
5743
5743
; UNROLL-NEXT: [[C11:%.*]] = add i32 [[C10]], [[C9]]
5744
5744
; UNROLL-NEXT: [[C12:%.*]] = sext i32 [[C11]] to i64
5745
5745
; UNROLL-NEXT: [[C13:%.*]] = add i64 [[C5]], [[C12]]
@@ -5815,11 +5815,11 @@ define i64 @trunc_with_first_order_recurrence() {
5815
5815
; UNROLL-NO-IC-NEXT: [[C5:%.*]] = phi i64 [ [[C23]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
5816
5816
; UNROLL-NO-IC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
5817
5817
; UNROLL-NO-IC-NEXT: [[X:%.*]] = phi i32 [ [[C24:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
5818
- ; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
5818
+ ; UNROLL-NO-IC-NEXT: [[Y :%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
5819
5819
; UNROLL-NO-IC-NEXT: [[C6]] = trunc i64 [[INDVARS_IV]] to i32
5820
5820
; UNROLL-NO-IC-NEXT: [[C8:%.*]] = mul i32 [[X]], [[C6]]
5821
5821
; UNROLL-NO-IC-NEXT: [[C9:%.*]] = add i32 [[C8]], 42
5822
- ; UNROLL-NO-IC-NEXT: [[C10:%.*]] = add i32 [[SCALAR_RECUR ]], [[C6]]
5822
+ ; UNROLL-NO-IC-NEXT: [[C10:%.*]] = add i32 [[Y ]], [[C6]]
5823
5823
; UNROLL-NO-IC-NEXT: [[C11:%.*]] = add i32 [[C10]], [[C9]]
5824
5824
; UNROLL-NO-IC-NEXT: [[C12:%.*]] = sext i32 [[C11]] to i64
5825
5825
; UNROLL-NO-IC-NEXT: [[C13:%.*]] = add i64 [[C5]], [[C12]]
@@ -5893,11 +5893,11 @@ define i64 @trunc_with_first_order_recurrence() {
5893
5893
; INTERLEAVE-NEXT: [[C5:%.*]] = phi i64 [ [[C23]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
5894
5894
; INTERLEAVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
5895
5895
; INTERLEAVE-NEXT: [[X:%.*]] = phi i32 [ [[C24:%.*]], [[LOOP]] ], [ 113, [[SCALAR_PH]] ]
5896
- ; INTERLEAVE-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
5896
+ ; INTERLEAVE-NEXT: [[Y :%.*]] = phi i32 [ [[C6:%.*]], [[LOOP]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
5897
5897
; INTERLEAVE-NEXT: [[C6]] = trunc i64 [[INDVARS_IV]] to i32
5898
5898
; INTERLEAVE-NEXT: [[C8:%.*]] = mul i32 [[X]], [[C6]]
5899
5899
; INTERLEAVE-NEXT: [[C9:%.*]] = add i32 [[C8]], 42
5900
- ; INTERLEAVE-NEXT: [[C10:%.*]] = add i32 [[SCALAR_RECUR ]], [[C6]]
5900
+ ; INTERLEAVE-NEXT: [[C10:%.*]] = add i32 [[Y ]], [[C6]]
5901
5901
; INTERLEAVE-NEXT: [[C11:%.*]] = add i32 [[C10]], [[C9]]
5902
5902
; INTERLEAVE-NEXT: [[C12:%.*]] = sext i32 [[C11]] to i64
5903
5903
; INTERLEAVE-NEXT: [[C13:%.*]] = add i64 [[C5]], [[C12]]
@@ -5980,9 +5980,9 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
5980
5980
; CHECK: loop:
5981
5981
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
5982
5982
; CHECK-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ]
5983
- ; CHECK-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
5983
+ ; CHECK-NEXT: [[RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
5984
5984
; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4
5985
- ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[SCALAR_RECUR ]]
5985
+ ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR ]]
5986
5986
; CHECK-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1
5987
5987
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
5988
5988
; CHECK-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32
@@ -6107,9 +6107,9 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
6107
6107
; UNROLL-NO-IC: loop:
6108
6108
; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
6109
6109
; UNROLL-NO-IC-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ]
6110
- ; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
6110
+ ; UNROLL-NO-IC-NEXT: [[RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
6111
6111
; UNROLL-NO-IC-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4
6112
- ; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[SCALAR_RECUR ]]
6112
+ ; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR ]]
6113
6113
; UNROLL-NO-IC-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1
6114
6114
; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
6115
6115
; UNROLL-NO-IC-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32
@@ -6159,9 +6159,9 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
6159
6159
; INTERLEAVE: loop:
6160
6160
; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 96, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
6161
6161
; INTERLEAVE-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ 96, [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ]
6162
- ; INTERLEAVE-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
6162
+ ; INTERLEAVE-NEXT: [[RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ]
6163
6163
; INTERLEAVE-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4
6164
- ; INTERLEAVE-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[SCALAR_RECUR ]]
6164
+ ; INTERLEAVE-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR ]]
6165
6165
; INTERLEAVE-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1
6166
6166
; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
6167
6167
; INTERLEAVE-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32
@@ -6265,13 +6265,13 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
6265
6265
; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY]] ]
6266
6266
; CHECK-NEXT: br label [[LOOP:%.*]]
6267
6267
; CHECK: loop:
6268
- ; CHECK-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
6268
+ ; CHECK-NEXT: [[FOR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
6269
6269
; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
6270
6270
; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
6271
6271
; CHECK-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
6272
6272
; CHECK-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
6273
6273
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
6274
- ; CHECK-NEXT: store i32 [[SCALAR_RECUR ]], ptr [[GEP]], align 4
6274
+ ; CHECK-NEXT: store i32 [[FOR ]], ptr [[GEP]], align 4
6275
6275
; CHECK-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
6276
6276
; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
6277
6277
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
@@ -6336,13 +6336,13 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
6336
6336
; IND-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
6337
6337
; IND-NEXT: br label [[LOOP:%.*]]
6338
6338
; IND: loop:
6339
- ; IND-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
6339
+ ; IND-NEXT: [[FOR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
6340
6340
; IND-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
6341
6341
; IND-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
6342
6342
; IND-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
6343
6343
; IND-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
6344
6344
; IND-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
6345
- ; IND-NEXT: store i32 [[SCALAR_RECUR ]], ptr [[GEP]], align 4
6345
+ ; IND-NEXT: store i32 [[FOR ]], ptr [[GEP]], align 4
6346
6346
; IND-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
6347
6347
; IND-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
6348
6348
; IND-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
@@ -6411,13 +6411,13 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
6411
6411
; UNROLL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
6412
6412
; UNROLL-NEXT: br label [[LOOP:%.*]]
6413
6413
; UNROLL: loop:
6414
- ; UNROLL-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
6414
+ ; UNROLL-NEXT: [[FOR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
6415
6415
; UNROLL-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
6416
6416
; UNROLL-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
6417
6417
; UNROLL-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
6418
6418
; UNROLL-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
6419
6419
; UNROLL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
6420
- ; UNROLL-NEXT: store i32 [[SCALAR_RECUR ]], ptr [[GEP]], align 4
6420
+ ; UNROLL-NEXT: store i32 [[FOR ]], ptr [[GEP]], align 4
6421
6421
; UNROLL-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
6422
6422
; UNROLL-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
6423
6423
; UNROLL-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
@@ -6494,13 +6494,13 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
6494
6494
; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY]] ]
6495
6495
; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]]
6496
6496
; UNROLL-NO-IC: loop:
6497
- ; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
6497
+ ; UNROLL-NO-IC-NEXT: [[FOR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
6498
6498
; UNROLL-NO-IC-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
6499
6499
; UNROLL-NO-IC-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
6500
6500
; UNROLL-NO-IC-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
6501
6501
; UNROLL-NO-IC-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
6502
6502
; UNROLL-NO-IC-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
6503
- ; UNROLL-NO-IC-NEXT: store i32 [[SCALAR_RECUR ]], ptr [[GEP]], align 4
6503
+ ; UNROLL-NO-IC-NEXT: store i32 [[FOR ]], ptr [[GEP]], align 4
6504
6504
; UNROLL-NO-IC-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
6505
6505
; UNROLL-NO-IC-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
6506
6506
; UNROLL-NO-IC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
@@ -6569,13 +6569,13 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
6569
6569
; INTERLEAVE-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
6570
6570
; INTERLEAVE-NEXT: br label [[LOOP:%.*]]
6571
6571
; INTERLEAVE: loop:
6572
- ; INTERLEAVE-NEXT: [[SCALAR_RECUR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
6572
+ ; INTERLEAVE-NEXT: [[FOR :%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV_2_CONV:%.*]], [[LOOP]] ]
6573
6573
; INTERLEAVE-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
6574
6574
; INTERLEAVE-NEXT: [[IV_2:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
6575
6575
; INTERLEAVE-NEXT: [[IV_2_EXT:%.*]] = shl i32 [[IV_2]], 24
6576
6576
; INTERLEAVE-NEXT: [[IV_2_CONV]] = ashr exact i32 [[IV_2_EXT]], 24
6577
6577
; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[IV_1]]
6578
- ; INTERLEAVE-NEXT: store i32 [[SCALAR_RECUR ]], ptr [[GEP]], align 4
6578
+ ; INTERLEAVE-NEXT: store i32 [[FOR ]], ptr [[GEP]], align 4
6579
6579
; INTERLEAVE-NEXT: [[IV_2_NEXT]] = add nsw i32 [[IV_2_CONV]], [[STEP]]
6580
6580
; INTERLEAVE-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
6581
6581
; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
0 commit comments