@@ -145,7 +145,7 @@ void test_dbg(void) {
145
145
// AArch32-NEXT: [[LDREX_I:%.*]] = call i32 @llvm.arm.ldrex.p0(ptr elementtype(i32) [[P:%.*]])
146
146
// AArch32-NEXT: [[STREX_I:%.*]] = call i32 @llvm.arm.strex.p0(i32 [[X:%.*]], ptr elementtype(i32) [[P]])
147
147
// AArch32-NEXT: [[TOBOOL_I:%.*]] = icmp ne i32 [[STREX_I]], 0
148
- // AArch32-NEXT: br i1 [[TOBOOL_I]], label [[DO_BODY_I]], label [[__SWP_EXIT:%.*]], !llvm.loop [[LOOP7 :![0-9]+]]
148
+ // AArch32-NEXT: br i1 [[TOBOOL_I]], label [[DO_BODY_I]], label [[__SWP_EXIT:%.*]], !llvm.loop [[LOOP3 :![0-9]+]]
149
149
// AArch32: __swp.exit:
150
150
// AArch32-NEXT: ret void
151
151
//
@@ -154,11 +154,11 @@ void test_dbg(void) {
154
154
// AArch64-NEXT: br label [[DO_BODY_I:%.*]]
155
155
// AArch64: do.body.i:
156
156
// AArch64-NEXT: [[LDXR_I:%.*]] = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) [[P:%.*]])
157
- // AArch64-NEXT: [[TMP1 :%.*]] = trunc i64 [[LDXR_I]] to i32
158
- // AArch64-NEXT: [[TMP2 :%.*]] = zext i32 [[X:%.*]] to i64
159
- // AArch64-NEXT: [[STXR_I:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[TMP2 ]], ptr elementtype(i32) [[P]])
157
+ // AArch64-NEXT: [[TMP0 :%.*]] = trunc i64 [[LDXR_I]] to i32
158
+ // AArch64-NEXT: [[TMP1 :%.*]] = zext i32 [[X:%.*]] to i64
159
+ // AArch64-NEXT: [[STXR_I:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[TMP1 ]], ptr elementtype(i32) [[P]])
160
160
// AArch64-NEXT: [[TOBOOL_I:%.*]] = icmp ne i32 [[STXR_I]], 0
161
- // AArch64-NEXT: br i1 [[TOBOOL_I]], label [[DO_BODY_I]], label [[__SWP_EXIT:%.*]], !llvm.loop [[LOOP6 :![0-9]+]]
161
+ // AArch64-NEXT: br i1 [[TOBOOL_I]], label [[DO_BODY_I]], label [[__SWP_EXIT:%.*]], !llvm.loop [[LOOP2 :![0-9]+]]
162
162
// AArch64: __swp.exit:
163
163
// AArch64-NEXT: ret void
164
164
//
@@ -484,17 +484,17 @@ uint32_t test_rev16(uint32_t t) {
484
484
// AArch64-NEXT: [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV_I]])
485
485
// AArch64-NEXT: [[REM_I_I10_I:%.*]] = urem i32 16, 32
486
486
// AArch64-NEXT: [[CMP_I_I11_I:%.*]] = icmp eq i32 [[REM_I_I10_I]], 0
487
- // AArch64-NEXT: br i1 [[CMP_I_I11_I]], label [[IF_THEN_I_I12_I :%.*]], label [[IF_END_I_I17_I :%.*]]
488
- // AArch64: if.then.i.i12 .i:
487
+ // AArch64-NEXT: br i1 [[CMP_I_I11_I]], label [[IF_THEN_I_I17_I :%.*]], label [[IF_END_I_I12_I :%.*]]
488
+ // AArch64: if.then.i.i17 .i:
489
489
// AArch64-NEXT: br label [[__REV16_EXIT18_I:%.*]]
490
- // AArch64: if.end.i.i17 .i:
490
+ // AArch64: if.end.i.i12 .i:
491
491
// AArch64-NEXT: [[SHR_I_I13_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I10_I]]
492
492
// AArch64-NEXT: [[SUB_I_I14_I:%.*]] = sub i32 32, [[REM_I_I10_I]]
493
493
// AArch64-NEXT: [[SHL_I_I15_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I14_I]]
494
494
// AArch64-NEXT: [[OR_I_I16_I:%.*]] = or i32 [[SHR_I_I13_I]], [[SHL_I_I15_I]]
495
495
// AArch64-NEXT: br label [[__REV16_EXIT18_I]]
496
496
// AArch64: __rev16.exit18.i:
497
- // AArch64-NEXT: [[RETVAL_I_I6_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I12_I ]] ], [ [[OR_I_I16_I]], [[IF_END_I_I17_I ]] ]
497
+ // AArch64-NEXT: [[RETVAL_I_I6_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I17_I ]] ], [ [[OR_I_I16_I]], [[IF_END_I_I12_I ]] ]
498
498
// AArch64-NEXT: [[CONV1_I:%.*]] = zext i32 [[RETVAL_I_I6_I_0]] to i64
499
499
// AArch64-NEXT: [[SHL_I:%.*]] = shl i64 [[CONV1_I]], 32
500
500
// AArch64-NEXT: [[CONV2_I:%.*]] = trunc i64 [[T]] to i32
@@ -527,17 +527,17 @@ long test_rev16l(long t) {
527
527
// ARM-NEXT: [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV_I]])
528
528
// ARM-NEXT: [[REM_I_I10_I:%.*]] = urem i32 16, 32
529
529
// ARM-NEXT: [[CMP_I_I11_I:%.*]] = icmp eq i32 [[REM_I_I10_I]], 0
530
- // ARM-NEXT: br i1 [[CMP_I_I11_I]], label [[IF_THEN_I_I12_I :%.*]], label [[IF_END_I_I17_I :%.*]]
531
- // ARM: if.then.i.i12 .i:
530
+ // ARM-NEXT: br i1 [[CMP_I_I11_I]], label [[IF_THEN_I_I17_I :%.*]], label [[IF_END_I_I12_I :%.*]]
531
+ // ARM: if.then.i.i17 .i:
532
532
// ARM-NEXT: br label [[__REV16_EXIT18_I:%.*]]
533
- // ARM: if.end.i.i17 .i:
533
+ // ARM: if.end.i.i12 .i:
534
534
// ARM-NEXT: [[SHR_I_I13_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I10_I]]
535
535
// ARM-NEXT: [[SUB_I_I14_I:%.*]] = sub i32 32, [[REM_I_I10_I]]
536
536
// ARM-NEXT: [[SHL_I_I15_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I14_I]]
537
537
// ARM-NEXT: [[OR_I_I16_I:%.*]] = or i32 [[SHR_I_I13_I]], [[SHL_I_I15_I]]
538
538
// ARM-NEXT: br label [[__REV16_EXIT18_I]]
539
539
// ARM: __rev16.exit18.i:
540
- // ARM-NEXT: [[RETVAL_I_I6_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I12_I ]] ], [ [[OR_I_I16_I]], [[IF_END_I_I17_I ]] ]
540
+ // ARM-NEXT: [[RETVAL_I_I6_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I17_I ]] ], [ [[OR_I_I16_I]], [[IF_END_I_I12_I ]] ]
541
541
// ARM-NEXT: [[CONV1_I:%.*]] = zext i32 [[RETVAL_I_I6_I_0]] to i64
542
542
// ARM-NEXT: [[SHL_I:%.*]] = shl i64 [[CONV1_I]], 32
543
543
// ARM-NEXT: [[CONV2_I:%.*]] = trunc i64 [[T]] to i32
@@ -662,7 +662,7 @@ int32_t test_qsub(int32_t a, int32_t b) {
662
662
extern int32_t f ();
663
663
// AArch32-LABEL: @test_qdbl(
664
664
// AArch32-NEXT: entry:
665
- // AArch32-NEXT: [[CALL:%.*]] = call i32 @f() #[[ATTR7 :[0-9]+]]
665
+ // AArch32-NEXT: [[CALL:%.*]] = call i32 @f() #[[ATTR9 :[0-9]+]]
666
666
// AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qadd(i32 [[CALL]], i32 [[CALL]])
667
667
// AArch32-NEXT: ret i32 [[TMP0]]
668
668
//
@@ -1456,12 +1456,12 @@ uint32_t test_crc32cd(uint32_t a, uint64_t b) {
1456
1456
/* 10.1 Special register intrinsics */
1457
1457
// AArch32-LABEL: @test_rsr(
1458
1458
// AArch32-NEXT: entry:
1459
- // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata [[META9 :![0-9]+]])
1459
+ // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata [[META5 :![0-9]+]])
1460
1460
// AArch32-NEXT: ret i32 [[TMP0]]
1461
1461
//
1462
1462
// AArch64-LABEL: @test_rsr(
1463
1463
// AArch64-NEXT: entry:
1464
- // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META8 :![0-9]+]])
1464
+ // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META4 :![0-9]+]])
1465
1465
// AArch64-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
1466
1466
// AArch64-NEXT: ret i32 [[TMP1]]
1467
1467
//
@@ -1475,12 +1475,12 @@ uint32_t test_rsr() {
1475
1475
1476
1476
// AArch32-LABEL: @test_rsr64(
1477
1477
// AArch32-NEXT: entry:
1478
- // AArch32-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META10 :![0-9]+]])
1478
+ // AArch32-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META6 :![0-9]+]])
1479
1479
// AArch32-NEXT: ret i64 [[TMP0]]
1480
1480
//
1481
1481
// AArch64-LABEL: @test_rsr64(
1482
1482
// AArch64-NEXT: entry:
1483
- // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META8 ]])
1483
+ // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META4 ]])
1484
1484
// AArch64-NEXT: ret i64 [[TMP0]]
1485
1485
//
1486
1486
uint64_t test_rsr64 () {
@@ -1494,7 +1494,7 @@ uint64_t test_rsr64() {
1494
1494
#ifdef __ARM_FEATURE_SYSREG128
1495
1495
// AArch6494D128-LABEL: @test_rsr128(
1496
1496
// AArch6494D128-NEXT: entry:
1497
- // AArch6494D128-NEXT: [[TMP0:%.*]] = call i128 @llvm.read_volatile_register.i128(metadata [[META8 ]])
1497
+ // AArch6494D128-NEXT: [[TMP0:%.*]] = call i128 @llvm.read_volatile_register.i128(metadata [[META4 ]])
1498
1498
// AArch6494D128-NEXT: ret i128 [[TMP0]]
1499
1499
//
1500
1500
__uint128_t test_rsr128 () {
@@ -1504,13 +1504,13 @@ __uint128_t test_rsr128() {
1504
1504
1505
1505
// AArch32-LABEL: @test_rsrp(
1506
1506
// AArch32-NEXT: entry:
1507
- // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata [[META11 :![0-9]+]])
1507
+ // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata [[META7 :![0-9]+]])
1508
1508
// AArch32-NEXT: [[TMP1:%.*]] = inttoptr i32 [[TMP0]] to ptr
1509
1509
// AArch32-NEXT: ret ptr [[TMP1]]
1510
1510
//
1511
1511
// AArch64-LABEL: @test_rsrp(
1512
1512
// AArch64-NEXT: entry:
1513
- // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META9 :![0-9]+]])
1513
+ // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META5 :![0-9]+]])
1514
1514
// AArch64-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to ptr
1515
1515
// AArch64-NEXT: ret ptr [[TMP1]]
1516
1516
//
@@ -1520,13 +1520,13 @@ void *test_rsrp() {
1520
1520
1521
1521
// AArch32-LABEL: @test_wsr(
1522
1522
// AArch32-NEXT: entry:
1523
- // AArch32-NEXT: call void @llvm.write_register.i32(metadata [[META9 ]], i32 [[V:%.*]])
1523
+ // AArch32-NEXT: call void @llvm.write_register.i32(metadata [[META5 ]], i32 [[V:%.*]])
1524
1524
// AArch32-NEXT: ret void
1525
1525
//
1526
1526
// AArch64-LABEL: @test_wsr(
1527
1527
// AArch64-NEXT: entry:
1528
1528
// AArch64-NEXT: [[TMP0:%.*]] = zext i32 [[V:%.*]] to i64
1529
- // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META8 ]], i64 [[TMP0]])
1529
+ // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META4 ]], i64 [[TMP0]])
1530
1530
// AArch64-NEXT: ret void
1531
1531
//
1532
1532
void test_wsr (uint32_t v ) {
@@ -1539,12 +1539,12 @@ void test_wsr(uint32_t v) {
1539
1539
1540
1540
// AArch32-LABEL: @test_wsr64(
1541
1541
// AArch32-NEXT: entry:
1542
- // AArch32-NEXT: call void @llvm.write_register.i64(metadata [[META10 ]], i64 [[V:%.*]])
1542
+ // AArch32-NEXT: call void @llvm.write_register.i64(metadata [[META6 ]], i64 [[V:%.*]])
1543
1543
// AArch32-NEXT: ret void
1544
1544
//
1545
1545
// AArch64-LABEL: @test_wsr64(
1546
1546
// AArch64-NEXT: entry:
1547
- // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META8 ]], i64 [[V:%.*]])
1547
+ // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META4 ]], i64 [[V:%.*]])
1548
1548
// AArch64-NEXT: ret void
1549
1549
//
1550
1550
void test_wsr64 (uint64_t v ) {
@@ -1558,7 +1558,7 @@ void test_wsr64(uint64_t v) {
1558
1558
#ifdef __ARM_FEATURE_SYSREG128
1559
1559
// AArch6494D128-LABEL: @test_wsr128(
1560
1560
// AArch6494D128-NEXT: entry:
1561
- // AArch6494D128-NEXT: call void @llvm.write_register.i128(metadata [[META8 ]], i128 [[V:%.*]])
1561
+ // AArch6494D128-NEXT: call void @llvm.write_register.i128(metadata [[META4 ]], i128 [[V:%.*]])
1562
1562
// AArch6494D128-NEXT: ret void
1563
1563
//
1564
1564
void test_wsr128 (__uint128_t v ) {
@@ -1570,13 +1570,13 @@ void test_wsr128(__uint128_t v) {
1570
1570
// AArch32-LABEL: @test_wsrp(
1571
1571
// AArch32-NEXT: entry:
1572
1572
// AArch32-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[V:%.*]] to i32
1573
- // AArch32-NEXT: call void @llvm.write_register.i32(metadata [[META11 ]], i32 [[TMP0]])
1573
+ // AArch32-NEXT: call void @llvm.write_register.i32(metadata [[META7 ]], i32 [[TMP0]])
1574
1574
// AArch32-NEXT: ret void
1575
1575
//
1576
1576
// AArch64-LABEL: @test_wsrp(
1577
1577
// AArch64-NEXT: entry:
1578
1578
// AArch64-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[V:%.*]] to i64
1579
- // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META9 ]], i64 [[TMP0]])
1579
+ // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META5 ]], i64 [[TMP0]])
1580
1580
// AArch64-NEXT: ret void
1581
1581
//
1582
1582
void test_wsrp (void * v ) {
@@ -1586,19 +1586,19 @@ void test_wsrp(void *v) {
1586
1586
// AArch32-LABEL: @test_rsrf(
1587
1587
// AArch32-NEXT: entry:
1588
1588
// AArch32-NEXT: [[REF_TMP:%.*]] = alloca i32, align 4
1589
- // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata [[META9 ]])
1589
+ // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata [[META5 ]])
1590
1590
// AArch32-NEXT: store i32 [[TMP0]], ptr [[REF_TMP]], align 4
1591
- // AArch32-NEXT: [[TMP2 :%.*]] = load float, ptr [[REF_TMP]], align 4
1592
- // AArch32-NEXT: ret float [[TMP2 ]]
1591
+ // AArch32-NEXT: [[TMP1 :%.*]] = load float, ptr [[REF_TMP]], align 4
1592
+ // AArch32-NEXT: ret float [[TMP1 ]]
1593
1593
//
1594
1594
// AArch64-LABEL: @test_rsrf(
1595
1595
// AArch64-NEXT: entry:
1596
1596
// AArch64-NEXT: [[REF_TMP:%.*]] = alloca i32, align 4
1597
- // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META8 ]])
1597
+ // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META4 ]])
1598
1598
// AArch64-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
1599
1599
// AArch64-NEXT: store i32 [[TMP1]], ptr [[REF_TMP]], align 4
1600
- // AArch64-NEXT: [[TMP3 :%.*]] = load float, ptr [[REF_TMP]], align 4
1601
- // AArch64-NEXT: ret float [[TMP3 ]]
1600
+ // AArch64-NEXT: [[TMP2 :%.*]] = load float, ptr [[REF_TMP]], align 4
1601
+ // AArch64-NEXT: ret float [[TMP2 ]]
1602
1602
//
1603
1603
float test_rsrf () {
1604
1604
#ifdef __ARM_32BIT_STATE
@@ -1611,18 +1611,18 @@ float test_rsrf() {
1611
1611
// AArch32-LABEL: @test_rsrf64(
1612
1612
// AArch32-NEXT: entry:
1613
1613
// AArch32-NEXT: [[REF_TMP:%.*]] = alloca i64, align 8
1614
- // AArch32-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META10 ]])
1614
+ // AArch32-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META6 ]])
1615
1615
// AArch32-NEXT: store i64 [[TMP0]], ptr [[REF_TMP]], align 8
1616
- // AArch32-NEXT: [[TMP2 :%.*]] = load double, ptr [[REF_TMP]], align 8
1617
- // AArch32-NEXT: ret double [[TMP2 ]]
1616
+ // AArch32-NEXT: [[TMP1 :%.*]] = load double, ptr [[REF_TMP]], align 8
1617
+ // AArch32-NEXT: ret double [[TMP1 ]]
1618
1618
//
1619
1619
// AArch64-LABEL: @test_rsrf64(
1620
1620
// AArch64-NEXT: entry:
1621
1621
// AArch64-NEXT: [[REF_TMP:%.*]] = alloca i64, align 8
1622
- // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META8 ]])
1622
+ // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META4 ]])
1623
1623
// AArch64-NEXT: store i64 [[TMP0]], ptr [[REF_TMP]], align 8
1624
- // AArch64-NEXT: [[TMP2 :%.*]] = load double, ptr [[REF_TMP]], align 8
1625
- // AArch64-NEXT: ret double [[TMP2 ]]
1624
+ // AArch64-NEXT: [[TMP1 :%.*]] = load double, ptr [[REF_TMP]], align 8
1625
+ // AArch64-NEXT: ret double [[TMP1 ]]
1626
1626
//
1627
1627
double test_rsrf64 () {
1628
1628
#ifdef __ARM_32BIT_STATE
@@ -1636,17 +1636,17 @@ double test_rsrf64() {
1636
1636
// AArch32-NEXT: entry:
1637
1637
// AArch32-NEXT: [[V_ADDR:%.*]] = alloca float, align 4
1638
1638
// AArch32-NEXT: store float [[V:%.*]], ptr [[V_ADDR]], align 4
1639
- // AArch32-NEXT: [[TMP1 :%.*]] = load i32, ptr [[V_ADDR]], align 4
1640
- // AArch32-NEXT: call void @llvm.write_register.i32(metadata [[META9 ]], i32 [[TMP1 ]])
1639
+ // AArch32-NEXT: [[TMP0 :%.*]] = load i32, ptr [[V_ADDR]], align 4
1640
+ // AArch32-NEXT: call void @llvm.write_register.i32(metadata [[META5 ]], i32 [[TMP0 ]])
1641
1641
// AArch32-NEXT: ret void
1642
1642
//
1643
1643
// AArch64-LABEL: @test_wsrf(
1644
1644
// AArch64-NEXT: entry:
1645
1645
// AArch64-NEXT: [[V_ADDR:%.*]] = alloca float, align 4
1646
1646
// AArch64-NEXT: store float [[V:%.*]], ptr [[V_ADDR]], align 4
1647
- // AArch64-NEXT: [[TMP1 :%.*]] = load i32, ptr [[V_ADDR]], align 4
1648
- // AArch64-NEXT: [[TMP2 :%.*]] = zext i32 [[TMP1 ]] to i64
1649
- // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META8 ]], i64 [[TMP2 ]])
1647
+ // AArch64-NEXT: [[TMP0 :%.*]] = load i32, ptr [[V_ADDR]], align 4
1648
+ // AArch64-NEXT: [[TMP1 :%.*]] = zext i32 [[TMP0 ]] to i64
1649
+ // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META4 ]], i64 [[TMP1 ]])
1650
1650
// AArch64-NEXT: ret void
1651
1651
//
1652
1652
void test_wsrf (float v ) {
@@ -1661,16 +1661,16 @@ void test_wsrf(float v) {
1661
1661
// AArch32-NEXT: entry:
1662
1662
// AArch32-NEXT: [[V_ADDR:%.*]] = alloca double, align 8
1663
1663
// AArch32-NEXT: store double [[V:%.*]], ptr [[V_ADDR]], align 8
1664
- // AArch32-NEXT: [[TMP1 :%.*]] = load i64, ptr [[V_ADDR]], align 8
1665
- // AArch32-NEXT: call void @llvm.write_register.i64(metadata [[META10 ]], i64 [[TMP1 ]])
1664
+ // AArch32-NEXT: [[TMP0 :%.*]] = load i64, ptr [[V_ADDR]], align 8
1665
+ // AArch32-NEXT: call void @llvm.write_register.i64(metadata [[META6 ]], i64 [[TMP0 ]])
1666
1666
// AArch32-NEXT: ret void
1667
1667
//
1668
1668
// AArch64-LABEL: @test_wsrf64(
1669
1669
// AArch64-NEXT: entry:
1670
1670
// AArch64-NEXT: [[V_ADDR:%.*]] = alloca double, align 8
1671
1671
// AArch64-NEXT: store double [[V:%.*]], ptr [[V_ADDR]], align 8
1672
- // AArch64-NEXT: [[TMP1 :%.*]] = load i64, ptr [[V_ADDR]], align 8
1673
- // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META8 ]], i64 [[TMP1 ]])
1672
+ // AArch64-NEXT: [[TMP0 :%.*]] = load i64, ptr [[V_ADDR]], align 8
1673
+ // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META4 ]], i64 [[TMP0 ]])
1674
1674
// AArch64-NEXT: ret void
1675
1675
//
1676
1676
void test_wsrf64 (double v ) {
0 commit comments