| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
 1000
 1001
 1002
 1003
 1004
 1005
 1006
 1007
 1008
 1009
 1010
 1011
 1012
 1013
 1014
 1015
 1016
 1017
 1018
 1019
 1020
 1021
 1022
 1023
 1024
 1025
 1026
 1027
 1028
 1029
 1030
 1031
 1032
 1033
 1034
 1035
 1036
 1037
 1038
 1039
 1040
 1041
 1042
 1043
 1044
 1045
 1046
 1047
 1048
 1049
 1050
 1051
 1052
 1053
 1054
 1055
 1056
 1057
 1058
 1059
 1060
 1061
 1062
 1063
 1064
 1065
 1066
 1067
 1068
 1069
 1070
 1071
 1072
 1073
 1074
 1075
 1076
 1077
 1078
 1079
 1080
 1081
 1082
 1083
 1084
 1085
 1086
 1087
 1088
 1089
 1090
 1091
 1092
 1093
 1094
 1095
 1096
 1097
 1098
 1099
 1100
 1101
 1102
 1103
 1104
 1105
 1106
 1107
 1108
 1109
 1110
 1111
 1112
 1113
 1114
 1115
 1116
 1117
 1118
 1119
 1120
 1121
 1122
 1123
 1124
 1125
 1126
 1127
 1128
 1129
 1130
 1131
 1132
 1133
 1134
 1135
 1136
 1137
 1138
 1139
 1140
 1141
 1142
 1143
 1144
 1145
 1146
 1147
 1148
 1149
 1150
 1151
 1152
 1153
 1154
 1155
 1156
 1157
 1158
 1159
 1160
 1161
 1162
 1163
 1164
 1165
 1166
 1167
 1168
 1169
 1170
 1171
 1172
 1173
 1174
 1175
 1176
 1177
 1178
 1179
 1180
 1181
 1182
 1183
 1184
 1185
 1186
 1187
 1188
 1189
 1190
 1191
 1192
 1193
 1194
 1195
 1196
 1197
 1198
 1199
 1200
 1201
 1202
 1203
 1204
 1205
 1206
 1207
 1208
 1209
 1210
 1211
 1212
 1213
 1214
 1215
 1216
 1217
 1218
 1219
 1220
 1221
 1222
 1223
 1224
 1225
 1226
 1227
 1228
 1229
 1230
 1231
 1232
 1233
 1234
 1235
 1236
 1237
 1238
 1239
 1240
 1241
 1242
 1243
 1244
 1245
 1246
 1247
 1248
 1249
 1250
 1251
 1252
 1253
 1254
 1255
 1256
 1257
 1258
 1259
 1260
 1261
 1262
 1263
 1264
 1265
 1266
 1267
 1268
 1269
 1270
 1271
 1272
 1273
 1274
 1275
 1276
 1277
 1278
 1279
 1280
 1281
 1282
 1283
 1284
 1285
 1286
 1287
 1288
 1289
 1290
 1291
 1292
 1293
 1294
 1295
 1296
 1297
 1298
 1299
 1300
 1301
 1302
 1303
 1304
 1305
 1306
 1307
 1308
 1309
 1310
 1311
 1312
 1313
 1314
 1315
 1316
 1317
 1318
 1319
 1320
 1321
 1322
 1323
 1324
 1325
 1326
 1327
 1328
 1329
 1330
 1331
 1332
 1333
 1334
 1335
 1336
 1337
 1338
 1339
 1340
 1341
 1342
 1343
 1344
 1345
 1346
 1347
 1348
 1349
 1350
 1351
 1352
 1353
 1354
 1355
 1356
 1357
 1358
 1359
 1360
 1361
 1362
 1363
 1364
 1365
 1366
 1367
 1368
 1369
 1370
 1371
 1372
 1373
 1374
 1375
 1376
 1377
 1378
 1379
 1380
 1381
 1382
 1383
 1384
 1385
 1386
 1387
 1388
 1389
 1390
 1391
 1392
 1393
 1394
 1395
 1396
 1397
 1398
 1399
 1400
 1401
 1402
 1403
 1404
 1405
 1406
 1407
 1408
 1409
 1410
 1411
 1412
 1413
 1414
 1415
 1416
 1417
 1418
 1419
 1420
 1421
 1422
 1423
 1424
 1425
 1426
 1427
 1428
 1429
 1430
 1431
 1432
 1433
 1434
 1435
 1436
 1437
 1438
 1439
 1440
 1441
 1442
 1443
 1444
 1445
 1446
 1447
 1448
 1449
 1450
 1451
 1452
 1453
 1454
 1455
 1456
 1457
 1458
 1459
 1460
 1461
 1462
 1463
 1464
 1465
 1466
 1467
 1468
 1469
 1470
 1471
 1472
 1473
 1474
 1475
 1476
 1477
 1478
 1479
 1480
 1481
 1482
 1483
 1484
 1485
 1486
 1487
 1488
 1489
 1490
 1491
 1492
 1493
 1494
 1495
 1496
 1497
 1498
 1499
 1500
 1501
 1502
 1503
 1504
 1505
 1506
 1507
 1508
 1509
 1510
 1511
 1512
 1513
 1514
 1515
 1516
 1517
 1518
 1519
 1520
 1521
 1522
 1523
 1524
 1525
 1526
 1527
 1528
 1529
 1530
 1531
 1532
 1533
 1534
 1535
 1536
 1537
 1538
 1539
 1540
 1541
 1542
 1543
 1544
 1545
 1546
 1547
 1548
 1549
 1550
 1551
 1552
 1553
 1554
 1555
 1556
 1557
 1558
 1559
 1560
 1561
 1562
 1563
 1564
 1565
 1566
 1567
 1568
 1569
 1570
 1571
 1572
 1573
 1574
 1575
 1576
 1577
 1578
 1579
 1580
 1581
 1582
 1583
 1584
 1585
 1586
 1587
 1588
 1589
 1590
 1591
 1592
 1593
 1594
 1595
 1596
 1597
 1598
 1599
 1600
 1601
 1602
 1603
 1604
 1605
 1606
 1607
 1608
 1609
 1610
 1611
 1612
 1613
 1614
 1615
 1616
 1617
 1618
 1619
 1620
 1621
 1622
 1623
 1624
 1625
 1626
 1627
 1628
 1629
 1630
 1631
 1632
 1633
 1634
 1635
 1636
 1637
 1638
 1639
 1640
 1641
 1642
 1643
 1644
 1645
 1646
 1647
 1648
 1649
 1650
 1651
 1652
 1653
 1654
 1655
 1656
 1657
 1658
 1659
 1660
 1661
 1662
 1663
 1664
 1665
 1666
 1667
 1668
 1669
 1670
 1671
 1672
 1673
 1674
 1675
 1676
 1677
 1678
 1679
 1680
 1681
 1682
 1683
 1684
 1685
 1686
 1687
 1688
 1689
 1690
 1691
 1692
 1693
 1694
 1695
 1696
 1697
 1698
 1699
 1700
 1701
 1702
 1703
 1704
 1705
 1706
 1707
 1708
 1709
 1710
 1711
 1712
 1713
 1714
 1715
 1716
 1717
 1718
 1719
 1720
 1721
 1722
 1723
 1724
 1725
 1726
 1727
 1728
 1729
 1730
 1731
 1732
 1733
 1734
 1735
 1736
 1737
 1738
 1739
 1740
 1741
 1742
 1743
 1744
 1745
 1746
 1747
 1748
 1749
 1750
 1751
 1752
 1753
 1754
 1755
 1756
 1757
 1758
 1759
 1760
 1761
 1762
 1763
 1764
 1765
 1766
 1767
 1768
 1769
 1770
 1771
 1772
 1773
 1774
 1775
 1776
 1777
 1778
 1779
 1780
 1781
 1782
 1783
 1784
 1785
 1786
 1787
 1788
 1789
 1790
 1791
 1792
 1793
 1794
 1795
 1796
 1797
 1798
 1799
 1800
 1801
 1802
 1803
 1804
 1805
 1806
 1807
 1808
 1809
 1810
 1811
 1812
 1813
 1814
 1815
 1816
 1817
 1818
 1819
 1820
 1821
 1822
 1823
 1824
 1825
 1826
 1827
 1828
 1829
 1830
 1831
 1832
 1833
 1834
 1835
 1836
 1837
 1838
 1839
 1840
 1841
 1842
 1843
 1844
 1845
 1846
 1847
 1848
 1849
 1850
 1851
 1852
 1853
 1854
 1855
 1856
 1857
 1858
 1859
 1860
 1861
 1862
 1863
 1864
 1865
 1866
 1867
 1868
 1869
 1870
 1871
 1872
 1873
 1874
 1875
 1876
 1877
 1878
 1879
 1880
 1881
 1882
 1883
 1884
 1885
 1886
 1887
 1888
 1889
 1890
 1891
 1892
 1893
 1894
 1895
 1896
 1897
 1898
 1899
 1900
 1901
 1902
 1903
 1904
 1905
 1906
 1907
 1908
 1909
 1910
 1911
 1912
 1913
 1914
 1915
 1916
 1917
 1918
 1919
 1920
 1921
 1922
 1923
 1924
 1925
 1926
 1927
 1928
 1929
 1930
 1931
 1932
 1933
 1934
 1935
 1936
 1937
 1938
 1939
 1940
 1941
 1942
 1943
 1944
 1945
 1946
 1947
 1948
 1949
 1950
 1951
 1952
 1953
 1954
 1955
 1956
 1957
 1958
 1959
 1960
 1961
 1962
 1963
 1964
 1965
 1966
 1967
 1968
 1969
 1970
 1971
 1972
 1973
 1974
 1975
 1976
 1977
 1978
 1979
 1980
 1981
 1982
 1983
 1984
 1985
 1986
 1987
 1988
 1989
 1990
 1991
 1992
 1993
 1994
 1995
 1996
 1997
 1998
 1999
 2000
 2001
 2002
 2003
 2004
 2005
 2006
 2007
 2008
 2009
 2010
 2011
 2012
 2013
 2014
 2015
 2016
 2017
 2018
 2019
 2020
 2021
 2022
 2023
 2024
 2025
 2026
 2027
 2028
 2029
 2030
 2031
 2032
 2033
 2034
 2035
 2036
 2037
 2038
 2039
 2040
 2041
 2042
 2043
 2044
 2045
 2046
 2047
 2048
 2049
 2050
 2051
 2052
 2053
 2054
 2055
 2056
 2057
 2058
 2059
 2060
 2061
 2062
 2063
 2064
 2065
 2066
 2067
 2068
 2069
 2070
 2071
 2072
 
 | //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file This file implements the utility functions used by the GlobalISel
/// pipeline.
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/CodeGenCommonISel.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineSizeOpts.h"
#include "llvm/CodeGen/RegisterBankInfo.h"
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Utils/SizeOpts.h"
#include <numeric>
#include <optional>
#define DEBUG_TYPE "globalisel-utils"
using namespace llvm;
using namespace MIPatternMatch;
Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
                                   const TargetInstrInfo &TII,
                                   const RegisterBankInfo &RBI, Register Reg,
                                   const TargetRegisterClass &RegClass) {
  if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
    return MRI.createVirtualRegister(&RegClass);
  return Reg;
}
Register llvm::constrainOperandRegClass(
    const MachineFunction &MF, const TargetRegisterInfo &TRI,
    MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
    const RegisterBankInfo &RBI, MachineInstr &InsertPt,
    const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
  Register Reg = RegMO.getReg();
  // Assume physical registers are properly constrained.
  assert(Reg.isVirtual() && "PhysReg not implemented");
  // Save the old register class to check whether
  // the change notifications will be required.
  // TODO: A better approach would be to pass
  // the observers to constrainRegToClass().
  auto *OldRegClass = MRI.getRegClassOrNull(Reg);
  Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
  // If we created a new virtual register because the class is not compatible
  // then create a copy between the new and the old register.
  if (ConstrainedReg != Reg) {
    MachineBasicBlock::iterator InsertIt(&InsertPt);
    MachineBasicBlock &MBB = *InsertPt.getParent();
    // FIXME: The copy needs to have the classes constrained for its operands.
    // Use operand's regbank to get the class for old register (Reg).
    if (RegMO.isUse()) {
      BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
              TII.get(TargetOpcode::COPY), ConstrainedReg)
          .addReg(Reg);
    } else {
      assert(RegMO.isDef() && "Must be a definition");
      BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
              TII.get(TargetOpcode::COPY), Reg)
          .addReg(ConstrainedReg);
    }
    if (GISelChangeObserver *Observer = MF.getObserver()) {
      Observer->changingInstr(*RegMO.getParent());
    }
    RegMO.setReg(ConstrainedReg);
    if (GISelChangeObserver *Observer = MF.getObserver()) {
      Observer->changedInstr(*RegMO.getParent());
    }
  } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
    if (GISelChangeObserver *Observer = MF.getObserver()) {
      if (!RegMO.isDef()) {
        MachineInstr *RegDef = MRI.getVRegDef(Reg);
        Observer->changedInstr(*RegDef);
      }
      Observer->changingAllUsesOfReg(MRI, Reg);
      Observer->finishedChangingAllUsesOfReg();
    }
  }
  return ConstrainedReg;
}
Register llvm::constrainOperandRegClass(
    const MachineFunction &MF, const TargetRegisterInfo &TRI,
    MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
    const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
    MachineOperand &RegMO, unsigned OpIdx) {
  Register Reg = RegMO.getReg();
  // Assume physical registers are properly constrained.
  assert(Reg.isVirtual() && "PhysReg not implemented");
  const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
  // Some of the target independent instructions, like COPY, may not impose any
  // register class constraints on some of their operands: If it's a use, we can
  // skip constraining as the instruction defining the register would constrain
  // it.
  if (OpRC) {
    // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
    // can have multiple regbanks for a superclass that combine different
    // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
    // resolved by targets during regbankselect should not be overridden.
    if (const auto *SubRC = TRI.getCommonSubClass(
            OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
      OpRC = SubRC;
    OpRC = TRI.getAllocatableClass(OpRC);
  }
  if (!OpRC) {
    assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
           "Register class constraint is required unless either the "
           "instruction is target independent or the operand is a use");
    // FIXME: Just bailing out like this here could be not enough, unless we
    // expect the users of this function to do the right thing for PHIs and
    // COPY:
    //   v1 = COPY v0
    //   v2 = COPY v1
    // v1 here may end up not being constrained at all. Please notice that to
    // reproduce the issue we likely need a destination pattern of a selection
    // rule producing such extra copies, not just an input GMIR with them as
    // every existing target using selectImpl handles copies before calling it
    // and they never reach this function.
    return Reg;
  }
  return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
                                  RegMO);
}
bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
                                            const TargetInstrInfo &TII,
                                            const TargetRegisterInfo &TRI,
                                            const RegisterBankInfo &RBI) {
  assert(!isPreISelGenericOpcode(I.getOpcode()) &&
         "A selected instruction is expected");
  MachineBasicBlock &MBB = *I.getParent();
  MachineFunction &MF = *MBB.getParent();
  MachineRegisterInfo &MRI = MF.getRegInfo();
  for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
    MachineOperand &MO = I.getOperand(OpI);
    // There's nothing to be done on non-register operands.
    if (!MO.isReg())
      continue;
    LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
    assert(MO.isReg() && "Unsupported non-reg operand");
    Register Reg = MO.getReg();
    // Physical registers don't need to be constrained.
    if (Reg.isPhysical())
      continue;
    // Register operands with a value of 0 (e.g. predicate operands) don't need
    // to be constrained.
    if (Reg == 0)
      continue;
    // If the operand is a vreg, we should constrain its regclass, and only
    // insert COPYs if that's impossible.
    // constrainOperandRegClass does that for us.
    constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
    // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
    // done.
    if (MO.isUse()) {
      int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
      if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
        I.tieOperands(DefIdx, OpI);
    }
  }
  return true;
}
bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
                         MachineRegisterInfo &MRI) {
  // Give up if either DstReg or SrcReg  is a physical register.
  if (DstReg.isPhysical() || SrcReg.isPhysical())
    return false;
  // Give up if the types don't match.
  if (MRI.getType(DstReg) != MRI.getType(SrcReg))
    return false;
  // Replace if either DstReg has no constraints or the register
  // constraints match.
  const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
  if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
    return true;
  // Otherwise match if the Src is already a regclass that is covered by the Dst
  // RegBank.
  return isa<const RegisterBank *>(DstRBC) && MRI.getRegClassOrNull(SrcReg) &&
         cast<const RegisterBank *>(DstRBC)->covers(
             *MRI.getRegClassOrNull(SrcReg));
}
bool llvm::isTriviallyDead(const MachineInstr &MI,
                           const MachineRegisterInfo &MRI) {
  // Instructions without side-effects are dead iff they only define dead regs.
  // This function is hot and this loop returns early in the common case,
  // so only perform additional checks before this if absolutely necessary.
  for (const auto &MO : MI.all_defs()) {
    Register Reg = MO.getReg();
    if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
      return false;
  }
  return MI.wouldBeTriviallyDead();
}
static void reportGISelDiagnostic(DiagnosticSeverity Severity,
                                  MachineFunction &MF,
                                  const TargetPassConfig &TPC,
                                  MachineOptimizationRemarkEmitter &MORE,
                                  MachineOptimizationRemarkMissed &R) {
  bool IsFatal = Severity == DS_Error &&
                 TPC.isGlobalISelAbortEnabled();
  // Print the function name explicitly if we don't have a debug location (which
  // makes the diagnostic less useful) or if we're going to emit a raw error.
  if (!R.getLocation().isValid() || IsFatal)
    R << (" (in function: " + MF.getName() + ")").str();
  if (IsFatal)
    report_fatal_error(Twine(R.getMsg()));
  else
    MORE.emit(R);
}
void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
                              MachineOptimizationRemarkEmitter &MORE,
                              MachineOptimizationRemarkMissed &R) {
  reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
}
void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
                              MachineOptimizationRemarkEmitter &MORE,
                              MachineOptimizationRemarkMissed &R) {
  MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
  reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
}
void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
                              MachineOptimizationRemarkEmitter &MORE,
                              const char *PassName, StringRef Msg,
                              const MachineInstr &MI) {
  MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
                                    MI.getDebugLoc(), MI.getParent());
  R << Msg;
  // Printing MI is expensive;  only do it if expensive remarks are enabled.
  if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
    R << ": " << ore::MNV("Inst", MI);
  reportGISelFailure(MF, TPC, MORE, R);
}
unsigned llvm::getInverseGMinMaxOpcode(unsigned MinMaxOpc) {
  switch (MinMaxOpc) {
  case TargetOpcode::G_SMIN:
    return TargetOpcode::G_SMAX;
  case TargetOpcode::G_SMAX:
    return TargetOpcode::G_SMIN;
  case TargetOpcode::G_UMIN:
    return TargetOpcode::G_UMAX;
  case TargetOpcode::G_UMAX:
    return TargetOpcode::G_UMIN;
  default:
    llvm_unreachable("unrecognized opcode");
  }
}
std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
                                               const MachineRegisterInfo &MRI) {
  std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
      VReg, MRI, /*LookThroughInstrs*/ false);
  assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
         "Value found while looking through instrs");
  if (!ValAndVReg)
    return std::nullopt;
  return ValAndVReg->Value;
}
const APInt &llvm::getIConstantFromReg(Register Reg,
                                       const MachineRegisterInfo &MRI) {
  MachineInstr *Const = MRI.getVRegDef(Reg);
  assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&
         "expected a G_CONSTANT on Reg");
  return Const->getOperand(1).getCImm()->getValue();
}
std::optional<int64_t>
llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
  std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
  if (Val && Val->getBitWidth() <= 64)
    return Val->getSExtValue();
  return std::nullopt;
}
namespace {
// This function is used in many places, and as such, it has some
// micro-optimizations to try and make it as fast as it can be.
//
// - We use template arguments to avoid an indirect call caused by passing a
// function_ref/std::function
// - GetAPCstValue does not return std::optional<APInt> as that's expensive.
// Instead it returns true/false and places the result in a pre-constructed
// APInt.
//
// Please change this function carefully and benchmark your changes.
template <bool (*IsConstantOpcode)(const MachineInstr *),
          bool (*GetAPCstValue)(const MachineInstr *MI, APInt &)>
std::optional<ValueAndVReg>
getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
                                  bool LookThroughInstrs = true,
                                  bool LookThroughAnyExt = false) {
  SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
  MachineInstr *MI;
  while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
         LookThroughInstrs) {
    switch (MI->getOpcode()) {
    case TargetOpcode::G_ANYEXT:
      if (!LookThroughAnyExt)
        return std::nullopt;
      [[fallthrough]];
    case TargetOpcode::G_TRUNC:
    case TargetOpcode::G_SEXT:
    case TargetOpcode::G_ZEXT:
      SeenOpcodes.push_back(std::make_pair(
          MI->getOpcode(),
          MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
      VReg = MI->getOperand(1).getReg();
      break;
    case TargetOpcode::COPY:
      VReg = MI->getOperand(1).getReg();
      if (VReg.isPhysical())
        return std::nullopt;
      break;
    case TargetOpcode::G_INTTOPTR:
      VReg = MI->getOperand(1).getReg();
      break;
    default:
      return std::nullopt;
    }
  }
  if (!MI || !IsConstantOpcode(MI))
    return std::nullopt;
  APInt Val;
  if (!GetAPCstValue(MI, Val))
    return std::nullopt;
  for (auto &Pair : reverse(SeenOpcodes)) {
    switch (Pair.first) {
    case TargetOpcode::G_TRUNC:
      Val = Val.trunc(Pair.second);
      break;
    case TargetOpcode::G_ANYEXT:
    case TargetOpcode::G_SEXT:
      Val = Val.sext(Pair.second);
      break;
    case TargetOpcode::G_ZEXT:
      Val = Val.zext(Pair.second);
      break;
    }
  }
  return ValueAndVReg{std::move(Val), VReg};
}
bool isIConstant(const MachineInstr *MI) {
  if (!MI)
    return false;
  return MI->getOpcode() == TargetOpcode::G_CONSTANT;
}
bool isFConstant(const MachineInstr *MI) {
  if (!MI)
    return false;
  return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
}
bool isAnyConstant(const MachineInstr *MI) {
  if (!MI)
    return false;
  unsigned Opc = MI->getOpcode();
  return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
}
bool getCImmAsAPInt(const MachineInstr *MI, APInt &Result) {
  const MachineOperand &CstVal = MI->getOperand(1);
  if (!CstVal.isCImm())
    return false;
  Result = CstVal.getCImm()->getValue();
  return true;
}
bool getCImmOrFPImmAsAPInt(const MachineInstr *MI, APInt &Result) {
  const MachineOperand &CstVal = MI->getOperand(1);
  if (CstVal.isCImm())
    Result = CstVal.getCImm()->getValue();
  else if (CstVal.isFPImm())
    Result = CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
  else
    return false;
  return true;
}
} // end anonymous namespace
std::optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
    Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
  return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
      VReg, MRI, LookThroughInstrs);
}
std::optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
    Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
    bool LookThroughAnyExt) {
  return getConstantVRegValWithLookThrough<isAnyConstant,
                                           getCImmOrFPImmAsAPInt>(
      VReg, MRI, LookThroughInstrs, LookThroughAnyExt);
}
std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
    Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
  auto Reg =
      getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
          VReg, MRI, LookThroughInstrs);
  if (!Reg)
    return std::nullopt;
  return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
                        Reg->VReg};
}
const ConstantFP *
llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
  MachineInstr *MI = MRI.getVRegDef(VReg);
  if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
    return nullptr;
  return MI->getOperand(1).getFPImm();
}
std::optional<DefinitionAndSourceRegister>
llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
  Register DefSrcReg = Reg;
  auto *DefMI = MRI.getVRegDef(Reg);
  auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
  if (!DstTy.isValid())
    return std::nullopt;
  unsigned Opc = DefMI->getOpcode();
  while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
    Register SrcReg = DefMI->getOperand(1).getReg();
    auto SrcTy = MRI.getType(SrcReg);
    if (!SrcTy.isValid())
      break;
    DefMI = MRI.getVRegDef(SrcReg);
    DefSrcReg = SrcReg;
    Opc = DefMI->getOpcode();
  }
  return DefinitionAndSourceRegister{DefMI, DefSrcReg};
}
MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
                                         const MachineRegisterInfo &MRI) {
  std::optional<DefinitionAndSourceRegister> DefSrcReg =
      getDefSrcRegIgnoringCopies(Reg, MRI);
  return DefSrcReg ? DefSrcReg->MI : nullptr;
}
Register llvm::getSrcRegIgnoringCopies(Register Reg,
                                       const MachineRegisterInfo &MRI) {
  std::optional<DefinitionAndSourceRegister> DefSrcReg =
      getDefSrcRegIgnoringCopies(Reg, MRI);
  return DefSrcReg ? DefSrcReg->Reg : Register();
}
void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
                        SmallVectorImpl<Register> &VRegs,
                        MachineIRBuilder &MIRBuilder,
                        MachineRegisterInfo &MRI) {
  for (int i = 0; i < NumParts; ++i)
    VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
  MIRBuilder.buildUnmerge(VRegs, Reg);
}
bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
                        SmallVectorImpl<Register> &VRegs,
                        SmallVectorImpl<Register> &LeftoverRegs,
                        MachineIRBuilder &MIRBuilder,
                        MachineRegisterInfo &MRI) {
  assert(!LeftoverTy.isValid() && "this is an out argument");
  unsigned RegSize = RegTy.getSizeInBits();
  unsigned MainSize = MainTy.getSizeInBits();
  unsigned NumParts = RegSize / MainSize;
  unsigned LeftoverSize = RegSize - NumParts * MainSize;
  // Use an unmerge when possible.
  if (LeftoverSize == 0) {
    for (unsigned I = 0; I < NumParts; ++I)
      VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
    MIRBuilder.buildUnmerge(VRegs, Reg);
    return true;
  }
  // Try to use unmerge for irregular vector split where possible
  // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
  // leftover, it becomes:
  //  <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
  //  <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
  if (RegTy.isVector() && MainTy.isVector()) {
    unsigned RegNumElts = RegTy.getNumElements();
    unsigned MainNumElts = MainTy.getNumElements();
    unsigned LeftoverNumElts = RegNumElts % MainNumElts;
    // If can unmerge to LeftoverTy, do it
    if (MainNumElts % LeftoverNumElts == 0 &&
        RegNumElts % LeftoverNumElts == 0 &&
        RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
        LeftoverNumElts > 1) {
      LeftoverTy = LLT::fixed_vector(LeftoverNumElts, RegTy.getElementType());
      // Unmerge the SrcReg to LeftoverTy vectors
      SmallVector<Register, 4> UnmergeValues;
      extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
                   MIRBuilder, MRI);
      // Find how many LeftoverTy makes one MainTy
      unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
      unsigned NumOfLeftoverVal =
          ((RegNumElts % MainNumElts) / LeftoverNumElts);
      // Create as many MainTy as possible using unmerged value
      SmallVector<Register, 4> MergeValues;
      for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
        MergeValues.push_back(UnmergeValues[I]);
        if (MergeValues.size() == LeftoverPerMain) {
          VRegs.push_back(
              MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
          MergeValues.clear();
        }
      }
      // Populate LeftoverRegs with the leftovers
      for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
           I < UnmergeValues.size(); I++) {
        LeftoverRegs.push_back(UnmergeValues[I]);
      }
      return true;
    }
  }
  // Perform irregular split. Leftover is last element of RegPieces.
  if (MainTy.isVector()) {
    SmallVector<Register, 8> RegPieces;
    extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
                       MRI);
    for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
      VRegs.push_back(RegPieces[i]);
    LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
    LeftoverTy = MRI.getType(LeftoverRegs[0]);
    return true;
  }
  LeftoverTy = LLT::scalar(LeftoverSize);
  // For irregular sizes, extract the individual parts.
  for (unsigned I = 0; I != NumParts; ++I) {
    Register NewReg = MRI.createGenericVirtualRegister(MainTy);
    VRegs.push_back(NewReg);
    MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
  }
  for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
       Offset += LeftoverSize) {
    Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
    LeftoverRegs.push_back(NewReg);
    MIRBuilder.buildExtract(NewReg, Reg, Offset);
  }
  return true;
}
void llvm::extractVectorParts(Register Reg, unsigned NumElts,
                              SmallVectorImpl<Register> &VRegs,
                              MachineIRBuilder &MIRBuilder,
                              MachineRegisterInfo &MRI) {
  LLT RegTy = MRI.getType(Reg);
  assert(RegTy.isVector() && "Expected a vector type");
  LLT EltTy = RegTy.getElementType();
  LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
  unsigned RegNumElts = RegTy.getNumElements();
  unsigned LeftoverNumElts = RegNumElts % NumElts;
  unsigned NumNarrowTyPieces = RegNumElts / NumElts;
  // Perfect split without leftover
  if (LeftoverNumElts == 0)
    return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
                        MRI);
  // Irregular split. Provide direct access to all elements for artifact
  // combiner using unmerge to elements. Then build vectors with NumElts
  // elements. Remaining element(s) will be (used to build vector) Leftover.
  SmallVector<Register, 8> Elts;
  extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
  unsigned Offset = 0;
  // Requested sub-vectors of NarrowTy.
  for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
    ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
    VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
  }
  // Leftover element(s).
  if (LeftoverNumElts == 1) {
    VRegs.push_back(Elts[Offset]);
  } else {
    LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
    ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
    VRegs.push_back(
        MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
  }
}
MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
                                 const MachineRegisterInfo &MRI) {
  MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
  return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
}
APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
  if (Size == 32)
    return APFloat(float(Val));
  if (Size == 64)
    return APFloat(Val);
  if (Size != 16)
    llvm_unreachable("Unsupported FPConstant size");
  bool Ignored;
  APFloat APF(Val);
  APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
  return APF;
}
std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
                                             const Register Op1,
                                             const Register Op2,
                                             const MachineRegisterInfo &MRI) {
  auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
  if (!MaybeOp2Cst)
    return std::nullopt;
  auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
  if (!MaybeOp1Cst)
    return std::nullopt;
  const APInt &C1 = MaybeOp1Cst->Value;
  const APInt &C2 = MaybeOp2Cst->Value;
  switch (Opcode) {
  default:
    break;
  case TargetOpcode::G_ADD:
    return C1 + C2;
  case TargetOpcode::G_PTR_ADD:
    // Types can be of different width here.
    // Result needs to be the same width as C1, so trunc or sext C2.
    return C1 + C2.sextOrTrunc(C1.getBitWidth());
  case TargetOpcode::G_AND:
    return C1 & C2;
  case TargetOpcode::G_ASHR:
    return C1.ashr(C2);
  case TargetOpcode::G_LSHR:
    return C1.lshr(C2);
  case TargetOpcode::G_MUL:
    return C1 * C2;
  case TargetOpcode::G_OR:
    return C1 | C2;
  case TargetOpcode::G_SHL:
    return C1 << C2;
  case TargetOpcode::G_SUB:
    return C1 - C2;
  case TargetOpcode::G_XOR:
    return C1 ^ C2;
  case TargetOpcode::G_UDIV:
    if (!C2.getBoolValue())
      break;
    return C1.udiv(C2);
  case TargetOpcode::G_SDIV:
    if (!C2.getBoolValue())
      break;
    return C1.sdiv(C2);
  case TargetOpcode::G_UREM:
    if (!C2.getBoolValue())
      break;
    return C1.urem(C2);
  case TargetOpcode::G_SREM:
    if (!C2.getBoolValue())
      break;
    return C1.srem(C2);
  case TargetOpcode::G_SMIN:
    return APIntOps::smin(C1, C2);
  case TargetOpcode::G_SMAX:
    return APIntOps::smax(C1, C2);
  case TargetOpcode::G_UMIN:
    return APIntOps::umin(C1, C2);
  case TargetOpcode::G_UMAX:
    return APIntOps::umax(C1, C2);
  }
  return std::nullopt;
}
std::optional<APFloat>
llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
                          const Register Op2, const MachineRegisterInfo &MRI) {
  const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
  if (!Op2Cst)
    return std::nullopt;
  const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
  if (!Op1Cst)
    return std::nullopt;
  APFloat C1 = Op1Cst->getValueAPF();
  const APFloat &C2 = Op2Cst->getValueAPF();
  switch (Opcode) {
  case TargetOpcode::G_FADD:
    C1.add(C2, APFloat::rmNearestTiesToEven);
    return C1;
  case TargetOpcode::G_FSUB:
    C1.subtract(C2, APFloat::rmNearestTiesToEven);
    return C1;
  case TargetOpcode::G_FMUL:
    C1.multiply(C2, APFloat::rmNearestTiesToEven);
    return C1;
  case TargetOpcode::G_FDIV:
    C1.divide(C2, APFloat::rmNearestTiesToEven);
    return C1;
  case TargetOpcode::G_FREM:
    C1.mod(C2);
    return C1;
  case TargetOpcode::G_FCOPYSIGN:
    C1.copySign(C2);
    return C1;
  case TargetOpcode::G_FMINNUM:
    return minnum(C1, C2);
  case TargetOpcode::G_FMAXNUM:
    return maxnum(C1, C2);
  case TargetOpcode::G_FMINIMUM:
    return minimum(C1, C2);
  case TargetOpcode::G_FMAXIMUM:
    return maximum(C1, C2);
  case TargetOpcode::G_FMINNUM_IEEE:
  case TargetOpcode::G_FMAXNUM_IEEE:
    // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
    // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
    // and currently there isn't a nice wrapper in APFloat for the version with
    // correct snan handling.
    break;
  default:
    break;
  }
  return std::nullopt;
}
SmallVector<APInt>
llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
                              const Register Op2,
                              const MachineRegisterInfo &MRI) {
  auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
  if (!SrcVec2)
    return SmallVector<APInt>();
  auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
  if (!SrcVec1)
    return SmallVector<APInt>();
  SmallVector<APInt> FoldedElements;
  for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
    auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
                                      SrcVec2->getSourceReg(Idx), MRI);
    if (!MaybeCst)
      return SmallVector<APInt>();
    FoldedElements.push_back(*MaybeCst);
  }
  return FoldedElements;
}
bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
                           bool SNaN) {
  const MachineInstr *DefMI = MRI.getVRegDef(Val);
  if (!DefMI)
    return false;
  const TargetMachine& TM = DefMI->getMF()->getTarget();
  if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
    return true;
  // If the value is a constant, we can obviously see if it is a NaN or not.
  if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
    return !FPVal->getValueAPF().isNaN() ||
           (SNaN && !FPVal->getValueAPF().isSignaling());
  }
  if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
    for (const auto &Op : DefMI->uses())
      if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
        return false;
    return true;
  }
  switch (DefMI->getOpcode()) {
  default:
    break;
  case TargetOpcode::G_FADD:
  case TargetOpcode::G_FSUB:
  case TargetOpcode::G_FMUL:
  case TargetOpcode::G_FDIV:
  case TargetOpcode::G_FREM:
  case TargetOpcode::G_FSIN:
  case TargetOpcode::G_FCOS:
  case TargetOpcode::G_FTAN:
  case TargetOpcode::G_FACOS:
  case TargetOpcode::G_FASIN:
  case TargetOpcode::G_FATAN:
  case TargetOpcode::G_FATAN2:
  case TargetOpcode::G_FCOSH:
  case TargetOpcode::G_FSINH:
  case TargetOpcode::G_FTANH:
  case TargetOpcode::G_FMA:
  case TargetOpcode::G_FMAD:
    if (SNaN)
      return true;
    // TODO: Need isKnownNeverInfinity
    return false;
  case TargetOpcode::G_FMINNUM_IEEE:
  case TargetOpcode::G_FMAXNUM_IEEE: {
    if (SNaN)
      return true;
    // This can return a NaN if either operand is an sNaN, or if both operands
    // are NaN.
    return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
            isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
           (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
  }
  case TargetOpcode::G_FMINNUM:
  case TargetOpcode::G_FMAXNUM: {
    // Only one needs to be known not-nan, since it will be returned if the
    // other ends up being one.
    return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
           isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
  }
  }
  if (SNaN) {
    // FP operations quiet. For now, just handle the ones inserted during
    // legalization.
    switch (DefMI->getOpcode()) {
    case TargetOpcode::G_FPEXT:
    case TargetOpcode::G_FPTRUNC:
    case TargetOpcode::G_FCANONICALIZE:
      return true;
    default:
      return false;
    }
  }
  return false;
}
Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
                                  const MachinePointerInfo &MPO) {
  auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);
  if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
    MachineFrameInfo &MFI = MF.getFrameInfo();
    return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
                           MPO.Offset);
  }
  if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
    const Module *M = MF.getFunction().getParent();
    return V->getPointerAlignment(M->getDataLayout());
  }
  return Align(1);
}
Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
                                        const TargetInstrInfo &TII,
                                        MCRegister PhysReg,
                                        const TargetRegisterClass &RC,
                                        const DebugLoc &DL, LLT RegTy) {
  MachineBasicBlock &EntryMBB = MF.front();
  MachineRegisterInfo &MRI = MF.getRegInfo();
  Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
  if (LiveIn) {
    MachineInstr *Def = MRI.getVRegDef(LiveIn);
    if (Def) {
      // FIXME: Should the verifier check this is in the entry block?
      assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
      return LiveIn;
    }
    // It's possible the incoming argument register and copy was added during
    // lowering, but later deleted due to being/becoming dead. If this happens,
    // re-insert the copy.
  } else {
    // The live in register was not present, so add it.
    LiveIn = MF.addLiveIn(PhysReg, &RC);
    if (RegTy.isValid())
      MRI.setType(LiveIn, RegTy);
  }
  BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
    .addReg(PhysReg);
  if (!EntryMBB.isLiveIn(PhysReg))
    EntryMBB.addLiveIn(PhysReg);
  return LiveIn;
}
std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
                                             const Register Op1, uint64_t Imm,
                                             const MachineRegisterInfo &MRI) {
  auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
  if (MaybeOp1Cst) {
    switch (Opcode) {
    default:
      break;
    case TargetOpcode::G_SEXT_INREG: {
      LLT Ty = MRI.getType(Op1);
      return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
    }
    }
  }
  return std::nullopt;
}
std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
                                              const Register Op0,
                                              const MachineRegisterInfo &MRI) {
  std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
  if (!Val)
    return Val;
  const unsigned DstSize = DstTy.getScalarSizeInBits();
  switch (Opcode) {
  case TargetOpcode::G_SEXT:
    return Val->sext(DstSize);
  case TargetOpcode::G_ZEXT:
  case TargetOpcode::G_ANYEXT:
    // TODO: DAG considers target preference when constant folding any_extend.
    return Val->zext(DstSize);
  default:
    break;
  }
  llvm_unreachable("unexpected cast opcode to constant fold");
}
std::optional<APFloat>
llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
                             const MachineRegisterInfo &MRI) {
  assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
  if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
    APFloat DstVal(getFltSemanticForLLT(DstTy));
    DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
                            APFloat::rmNearestTiesToEven);
    return DstVal;
  }
  return std::nullopt;
}
std::optional<SmallVector<unsigned>>
llvm::ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI,
                             std::function<unsigned(APInt)> CB) {
  LLT Ty = MRI.getType(Src);
  SmallVector<unsigned> FoldedCTLZs;
  auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
    auto MaybeCst = getIConstantVRegVal(R, MRI);
    if (!MaybeCst)
      return std::nullopt;
    return CB(*MaybeCst);
  };
  if (Ty.isVector()) {
    // Try to constant fold each element.
    auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
    if (!BV)
      return std::nullopt;
    for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
      if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
        FoldedCTLZs.emplace_back(*MaybeFold);
        continue;
      }
      return std::nullopt;
    }
    return FoldedCTLZs;
  }
  if (auto MaybeCst = tryFoldScalar(Src)) {
    FoldedCTLZs.emplace_back(*MaybeCst);
    return FoldedCTLZs;
  }
  return std::nullopt;
}
std::optional<SmallVector<APInt>>
llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
                       const MachineRegisterInfo &MRI) {
  LLT Ty = MRI.getType(Op1);
  if (Ty != MRI.getType(Op2))
    return std::nullopt;
  auto TryFoldScalar = [&MRI, Pred](Register LHS,
                                    Register RHS) -> std::optional<APInt> {
    auto LHSCst = getIConstantVRegVal(LHS, MRI);
    auto RHSCst = getIConstantVRegVal(RHS, MRI);
    if (!LHSCst || !RHSCst)
      return std::nullopt;
    switch (Pred) {
    case CmpInst::Predicate::ICMP_EQ:
      return APInt(/*numBits=*/1, LHSCst->eq(*RHSCst));
    case CmpInst::Predicate::ICMP_NE:
      return APInt(/*numBits=*/1, LHSCst->ne(*RHSCst));
    case CmpInst::Predicate::ICMP_UGT:
      return APInt(/*numBits=*/1, LHSCst->ugt(*RHSCst));
    case CmpInst::Predicate::ICMP_UGE:
      return APInt(/*numBits=*/1, LHSCst->uge(*RHSCst));
    case CmpInst::Predicate::ICMP_ULT:
      return APInt(/*numBits=*/1, LHSCst->ult(*RHSCst));
    case CmpInst::Predicate::ICMP_ULE:
      return APInt(/*numBits=*/1, LHSCst->ule(*RHSCst));
    case CmpInst::Predicate::ICMP_SGT:
      return APInt(/*numBits=*/1, LHSCst->sgt(*RHSCst));
    case CmpInst::Predicate::ICMP_SGE:
      return APInt(/*numBits=*/1, LHSCst->sge(*RHSCst));
    case CmpInst::Predicate::ICMP_SLT:
      return APInt(/*numBits=*/1, LHSCst->slt(*RHSCst));
    case CmpInst::Predicate::ICMP_SLE:
      return APInt(/*numBits=*/1, LHSCst->sle(*RHSCst));
    default:
      return std::nullopt;
    }
  };
  SmallVector<APInt> FoldedICmps;
  if (Ty.isVector()) {
    // Try to constant fold each element.
    auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
    auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
    if (!BV1 || !BV2)
      return std::nullopt;
    assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
    for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
      if (auto MaybeFold =
              TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
        FoldedICmps.emplace_back(*MaybeFold);
        continue;
      }
      return std::nullopt;
    }
    return FoldedICmps;
  }
  if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
    FoldedICmps.emplace_back(*MaybeCst);
    return FoldedICmps;
  }
  return std::nullopt;
}
bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
                                  GISelKnownBits *KB) {
  std::optional<DefinitionAndSourceRegister> DefSrcReg =
      getDefSrcRegIgnoringCopies(Reg, MRI);
  if (!DefSrcReg)
    return false;
  const MachineInstr &MI = *DefSrcReg->MI;
  const LLT Ty = MRI.getType(Reg);
  switch (MI.getOpcode()) {
  case TargetOpcode::G_CONSTANT: {
    unsigned BitWidth = Ty.getScalarSizeInBits();
    const ConstantInt *CI = MI.getOperand(1).getCImm();
    return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
  }
  case TargetOpcode::G_SHL: {
    // A left-shift of a constant one will have exactly one bit set because
    // shifting the bit off the end is undefined.
    // TODO: Constant splat
    if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
      if (*ConstLHS == 1)
        return true;
    }
    break;
  }
  case TargetOpcode::G_LSHR: {
    if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
      if (ConstLHS->isSignMask())
        return true;
    }
    break;
  }
  case TargetOpcode::G_BUILD_VECTOR: {
    // TODO: Probably should have a recursion depth guard since you could have
    // bitcasted vector elements.
    for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
      if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
        return false;
    return true;
  }
  case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
    // Only handle constants since we would need to know if number of leading
    // zeros is greater than the truncation amount.
    const unsigned BitWidth = Ty.getScalarSizeInBits();
    for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
      auto Const = getIConstantVRegVal(MO.getReg(), MRI);
      if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
        return false;
    }
    return true;
  }
  default:
    break;
  }
  if (!KB)
    return false;
  // More could be done here, though the above checks are enough
  // to handle some common cases.
  // Fall back to computeKnownBits to catch other known cases.
  KnownBits Known = KB->getKnownBits(Reg);
  return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
}
void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
  AU.addPreserved<StackProtector>();
}
LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
  if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
    return OrigTy;
  if (OrigTy.isVector() && TargetTy.isVector()) {
    LLT OrigElt = OrigTy.getElementType();
    LLT TargetElt = TargetTy.getElementType();
    // TODO: The docstring for this function says the intention is to use this
    // function to build MERGE/UNMERGE instructions. It won't be the case that
    // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
    // could implement getLCMType between the two in the future if there was a
    // need, but it is not worth it now as this function should not be used in
    // that way.
    assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
            (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
           "getLCMType not implemented between fixed and scalable vectors.");
    if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
      int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
                                TargetTy.getElementCount().getKnownMinValue());
      // Prefer the original element type.
      ElementCount Mul = OrigTy.getElementCount().multiplyCoefficientBy(
          TargetTy.getElementCount().getKnownMinValue());
      return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
                         OrigTy.getElementType());
    }
    unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
                            TargetTy.getSizeInBits().getKnownMinValue());
    return LLT::vector(
        ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
        OrigElt);
  }
  // One type is scalar, one type is vector
  if (OrigTy.isVector() || TargetTy.isVector()) {
    LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
    LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
    LLT EltTy = VecTy.getElementType();
    LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
    // Prefer scalar type from OrigTy.
    if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
      return LLT::vector(VecTy.getElementCount(), OrigEltTy);
    // Different size scalars. Create vector with the same total size.
    // LCM will take fixed/scalable from VecTy.
    unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
                                VecTy.getElementCount().getKnownMinValue(),
                            ScalarTy.getSizeInBits().getFixedValue());
    // Prefer type from OrigTy
    return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
                                         VecTy.getElementCount().isScalable()),
                       OrigEltTy);
  }
  // At this point, both types are scalars of different size
  unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
                          TargetTy.getSizeInBits().getFixedValue());
  // Preserve pointer types.
  if (LCM == OrigTy.getSizeInBits())
    return OrigTy;
  if (LCM == TargetTy.getSizeInBits())
    return TargetTy;
  return LLT::scalar(LCM);
}
LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
  if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
      (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
    llvm_unreachable(
        "getCoverTy not implemented between fixed and scalable vectors.");
  if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
      (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
    return getLCMType(OrigTy, TargetTy);
  unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
  unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
  if (OrigTyNumElts % TargetTyNumElts == 0)
    return OrigTy;
  unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
  return LLT::scalarOrVector(ElementCount::getFixed(NumElts),
                             OrigTy.getElementType());
}
LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
  if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
    return OrigTy;
  if (OrigTy.isVector() && TargetTy.isVector()) {
    LLT OrigElt = OrigTy.getElementType();
    // TODO: The docstring for this function says the intention is to use this
    // function to build MERGE/UNMERGE instructions. It won't be the case that
    // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
    // could implement getGCDType between the two in the future if there was a
    // need, but it is not worth it now as this function should not be used in
    // that way.
    assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
            (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
           "getGCDType not implemented between fixed and scalable vectors.");
    unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
                            TargetTy.getSizeInBits().getKnownMinValue());
    if (GCD == OrigElt.getSizeInBits())
      return LLT::scalarOrVector(ElementCount::get(1, OrigTy.isScalable()),
                                 OrigElt);
    // Cannot produce original element type, but both have vscale in common.
    if (GCD < OrigElt.getSizeInBits())
      return LLT::scalarOrVector(ElementCount::get(1, OrigTy.isScalable()),
                                 GCD);
    return LLT::vector(
        ElementCount::get(GCD / OrigElt.getSizeInBits().getFixedValue(),
                          OrigTy.isScalable()),
        OrigElt);
  }
  // If one type is vector and the element size matches the scalar size, then
  // the gcd is the scalar type.
  if (OrigTy.isVector() &&
      OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
    return OrigTy.getElementType();
  if (TargetTy.isVector() &&
      TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
    return OrigTy;
  // At this point, both types are either scalars of different type or one is a
  // vector and one is a scalar. If both types are scalars, the GCD type is the
  // GCD between the two scalar sizes. If one is vector and one is scalar, then
  // the GCD type is the GCD between the scalar and the vector element size.
  LLT OrigScalar = OrigTy.getScalarType();
  LLT TargetScalar = TargetTy.getScalarType();
  unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
                          TargetScalar.getSizeInBits().getFixedValue());
  return LLT::scalar(GCD);
}
std::optional<int> llvm::getSplatIndex(MachineInstr &MI) {
  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
         "Only G_SHUFFLE_VECTOR can have a splat index!");
  ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
  auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
  // If all elements are undefined, this shuffle can be considered a splat.
  // Return 0 for better potential for callers to simplify.
  if (FirstDefinedIdx == Mask.end())
    return 0;
  // Make sure all remaining elements are either undef or the same
  // as the first non-undef value.
  int SplatValue = *FirstDefinedIdx;
  if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
             [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
    return std::nullopt;
  return SplatValue;
}
static bool isBuildVectorOp(unsigned Opcode) {
  return Opcode == TargetOpcode::G_BUILD_VECTOR ||
         Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
}
namespace {
std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
                                                const MachineRegisterInfo &MRI,
                                                bool AllowUndef) {
  MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
  if (!MI)
    return std::nullopt;
  bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
  if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
    return std::nullopt;
  std::optional<ValueAndVReg> SplatValAndReg;
  for (MachineOperand &Op : MI->uses()) {
    Register Element = Op.getReg();
    // If we have a G_CONCAT_VECTOR, we recursively look into the
    // vectors that we're concatenating to see if they're splats.
    auto ElementValAndReg =
        isConcatVectorsOp
            ? getAnyConstantSplat(Element, MRI, AllowUndef)
            : getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
    // If AllowUndef, treat undef as value that will result in a constant splat.
    if (!ElementValAndReg) {
      if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
        continue;
      return std::nullopt;
    }
    // Record splat value
    if (!SplatValAndReg)
      SplatValAndReg = ElementValAndReg;
    // Different constant than the one already recorded, not a constant splat.
    if (SplatValAndReg->Value != ElementValAndReg->Value)
      return std::nullopt;
  }
  return SplatValAndReg;
}
} // end anonymous namespace
bool llvm::isBuildVectorConstantSplat(const Register Reg,
                                      const MachineRegisterInfo &MRI,
                                      int64_t SplatValue, bool AllowUndef) {
  if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
    return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
  return false;
}
bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
                                      const MachineRegisterInfo &MRI,
                                      int64_t SplatValue, bool AllowUndef) {
  return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
                                    AllowUndef);
}
std::optional<APInt>
llvm::getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI) {
  if (auto SplatValAndReg =
          getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
    if (std::optional<ValueAndVReg> ValAndVReg =
        getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
      return ValAndVReg->Value;
  }
  return std::nullopt;
}
std::optional<APInt>
llvm::getIConstantSplatVal(const MachineInstr &MI,
                           const MachineRegisterInfo &MRI) {
  return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
}
std::optional<int64_t>
llvm::getIConstantSplatSExtVal(const Register Reg,
                               const MachineRegisterInfo &MRI) {
  if (auto SplatValAndReg =
          getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
    return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
  return std::nullopt;
}
std::optional<int64_t>
llvm::getIConstantSplatSExtVal(const MachineInstr &MI,
                               const MachineRegisterInfo &MRI) {
  return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
}
std::optional<FPValueAndVReg>
llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI,
                        bool AllowUndef) {
  if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
    return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
  return std::nullopt;
}
bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
                                 const MachineRegisterInfo &MRI,
                                 bool AllowUndef) {
  return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
}
bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
                                const MachineRegisterInfo &MRI,
                                bool AllowUndef) {
  return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
}
std::optional<RegOrConstant>
llvm::getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) {
  unsigned Opc = MI.getOpcode();
  if (!isBuildVectorOp(Opc))
    return std::nullopt;
  if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
    return RegOrConstant(*Splat);
  auto Reg = MI.getOperand(1).getReg();
  if (any_of(drop_begin(MI.operands(), 2),
             [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
    return std::nullopt;
  return RegOrConstant(Reg);
}
static bool isConstantScalar(const MachineInstr &MI,
                             const MachineRegisterInfo &MRI,
                             bool AllowFP = true,
                             bool AllowOpaqueConstants = true) {
  switch (MI.getOpcode()) {
  case TargetOpcode::G_CONSTANT:
  case TargetOpcode::G_IMPLICIT_DEF:
    return true;
  case TargetOpcode::G_FCONSTANT:
    return AllowFP;
  case TargetOpcode::G_GLOBAL_VALUE:
  case TargetOpcode::G_FRAME_INDEX:
  case TargetOpcode::G_BLOCK_ADDR:
  case TargetOpcode::G_JUMP_TABLE:
    return AllowOpaqueConstants;
  default:
    return false;
  }
}
bool llvm::isConstantOrConstantVector(MachineInstr &MI,
                                      const MachineRegisterInfo &MRI) {
  Register Def = MI.getOperand(0).getReg();
  if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
    return true;
  GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
  if (!BV)
    return false;
  for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
    if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
        getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
      continue;
    return false;
  }
  return true;
}
bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
                                      const MachineRegisterInfo &MRI,
                                      bool AllowFP, bool AllowOpaqueConstants) {
  if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
    return true;
  if (!isBuildVectorOp(MI.getOpcode()))
    return false;
  const unsigned NumOps = MI.getNumOperands();
  for (unsigned I = 1; I != NumOps; ++I) {
    const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
    if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
      return false;
  }
  return true;
}
std::optional<APInt>
llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
                                      const MachineRegisterInfo &MRI) {
  Register Def = MI.getOperand(0).getReg();
  if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
    return C->Value;
  auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
  if (!MaybeCst)
    return std::nullopt;
  const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
  return APInt(ScalarSize, *MaybeCst, true);
}
std::optional<APFloat>
llvm::isConstantOrConstantSplatVectorFP(MachineInstr &MI,
                                        const MachineRegisterInfo &MRI) {
  Register Def = MI.getOperand(0).getReg();
  if (auto FpConst = getFConstantVRegValWithLookThrough(Def, MRI))
    return FpConst->Value;
  auto MaybeCstFP = getFConstantSplat(Def, MRI, /*allowUndef=*/false);
  if (!MaybeCstFP)
    return std::nullopt;
  return MaybeCstFP->Value;
}
bool llvm::isNullOrNullSplat(const MachineInstr &MI,
                             const MachineRegisterInfo &MRI, bool AllowUndefs) {
  switch (MI.getOpcode()) {
  case TargetOpcode::G_IMPLICIT_DEF:
    return AllowUndefs;
  case TargetOpcode::G_CONSTANT:
    return MI.getOperand(1).getCImm()->isNullValue();
  case TargetOpcode::G_FCONSTANT: {
    const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
    return FPImm->isZero() && !FPImm->isNegative();
  }
  default:
    if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
      return false;
    return isBuildVectorAllZeros(MI, MRI);
  }
}
bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
                                   const MachineRegisterInfo &MRI,
                                   bool AllowUndefs) {
  switch (MI.getOpcode()) {
  case TargetOpcode::G_IMPLICIT_DEF:
    return AllowUndefs;
  case TargetOpcode::G_CONSTANT:
    return MI.getOperand(1).getCImm()->isAllOnesValue();
  default:
    if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
      return false;
    return isBuildVectorAllOnes(MI, MRI);
  }
}
bool llvm::matchUnaryPredicate(
    const MachineRegisterInfo &MRI, Register Reg,
    std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
  const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
  if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
    return Match(nullptr);
  // TODO: Also handle fconstant
  if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
    return Match(Def->getOperand(1).getCImm());
  if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
    return false;
  for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
    Register SrcElt = Def->getOperand(I).getReg();
    const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
    if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
      if (!Match(nullptr))
        return false;
      continue;
    }
    if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
        !Match(SrcDef->getOperand(1).getCImm()))
      return false;
  }
  return true;
}
bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
                          bool IsFP) {
  switch (TLI.getBooleanContents(IsVector, IsFP)) {
  case TargetLowering::UndefinedBooleanContent:
    return Val & 0x1;
  case TargetLowering::ZeroOrOneBooleanContent:
    return Val == 1;
  case TargetLowering::ZeroOrNegativeOneBooleanContent:
    return Val == -1;
  }
  llvm_unreachable("Invalid boolean contents");
}
bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
                           bool IsVector, bool IsFP) {
  switch (TLI.getBooleanContents(IsVector, IsFP)) {
  case TargetLowering::UndefinedBooleanContent:
    return ~Val & 0x1;
  case TargetLowering::ZeroOrOneBooleanContent:
  case TargetLowering::ZeroOrNegativeOneBooleanContent:
    return Val == 0;
  }
  llvm_unreachable("Invalid boolean contents");
}
int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
                             bool IsFP) {
  switch (TLI.getBooleanContents(IsVector, IsFP)) {
  case TargetLowering::UndefinedBooleanContent:
  case TargetLowering::ZeroOrOneBooleanContent:
    return 1;
  case TargetLowering::ZeroOrNegativeOneBooleanContent:
    return -1;
  }
  llvm_unreachable("Invalid boolean contents");
}
void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
                            LostDebugLocObserver *LocObserver,
                            SmallInstListTy &DeadInstChain) {
  for (MachineOperand &Op : MI.uses()) {
    if (Op.isReg() && Op.getReg().isVirtual())
      DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
  }
  LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
  DeadInstChain.remove(&MI);
  MI.eraseFromParent();
  if (LocObserver)
    LocObserver->checkpoint(false);
}
void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
                       MachineRegisterInfo &MRI,
                       LostDebugLocObserver *LocObserver) {
  SmallInstListTy DeadInstChain;
  for (MachineInstr *MI : DeadInstrs)
    saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
  while (!DeadInstChain.empty()) {
    MachineInstr *Inst = DeadInstChain.pop_back_val();
    if (!isTriviallyDead(*Inst, MRI))
      continue;
    saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
  }
}
void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
                      LostDebugLocObserver *LocObserver) {
  return eraseInstrs({&MI}, MRI, LocObserver);
}
void llvm::salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI) {
  for (auto &Def : MI.defs()) {
    assert(Def.isReg() && "Must be a reg");
    SmallVector<MachineOperand *, 16> DbgUsers;
    for (auto &MOUse : MRI.use_operands(Def.getReg())) {
      MachineInstr *DbgValue = MOUse.getParent();
      // Ignore partially formed DBG_VALUEs.
      if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
        DbgUsers.push_back(&MOUse);
      }
    }
    if (!DbgUsers.empty()) {
      salvageDebugInfoForDbgValue(MRI, MI, DbgUsers);
    }
  }
}
bool llvm::isPreISelGenericFloatingPointOpcode(unsigned Opc) {
  switch (Opc) {
  case TargetOpcode::G_FABS:
  case TargetOpcode::G_FADD:
  case TargetOpcode::G_FCANONICALIZE:
  case TargetOpcode::G_FCEIL:
  case TargetOpcode::G_FCONSTANT:
  case TargetOpcode::G_FCOPYSIGN:
  case TargetOpcode::G_FCOS:
  case TargetOpcode::G_FDIV:
  case TargetOpcode::G_FEXP2:
  case TargetOpcode::G_FEXP:
  case TargetOpcode::G_FFLOOR:
  case TargetOpcode::G_FLOG10:
  case TargetOpcode::G_FLOG2:
  case TargetOpcode::G_FLOG:
  case TargetOpcode::G_FMA:
  case TargetOpcode::G_FMAD:
  case TargetOpcode::G_FMAXIMUM:
  case TargetOpcode::G_FMAXNUM:
  case TargetOpcode::G_FMAXNUM_IEEE:
  case TargetOpcode::G_FMINIMUM:
  case TargetOpcode::G_FMINNUM:
  case TargetOpcode::G_FMINNUM_IEEE:
  case TargetOpcode::G_FMUL:
  case TargetOpcode::G_FNEARBYINT:
  case TargetOpcode::G_FNEG:
  case TargetOpcode::G_FPEXT:
  case TargetOpcode::G_FPOW:
  case TargetOpcode::G_FPTRUNC:
  case TargetOpcode::G_FREM:
  case TargetOpcode::G_FRINT:
  case TargetOpcode::G_FSIN:
  case TargetOpcode::G_FTAN:
  case TargetOpcode::G_FACOS:
  case TargetOpcode::G_FASIN:
  case TargetOpcode::G_FATAN:
  case TargetOpcode::G_FATAN2:
  case TargetOpcode::G_FCOSH:
  case TargetOpcode::G_FSINH:
  case TargetOpcode::G_FTANH:
  case TargetOpcode::G_FSQRT:
  case TargetOpcode::G_FSUB:
  case TargetOpcode::G_INTRINSIC_ROUND:
  case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
  case TargetOpcode::G_INTRINSIC_TRUNC:
    return true;
  default:
    return false;
  }
}
/// Shifts return poison if shiftwidth is larger than the bitwidth.
static bool shiftAmountKnownInRange(Register ShiftAmount,
                                    const MachineRegisterInfo &MRI) {
  LLT Ty = MRI.getType(ShiftAmount);
  if (Ty.isScalableVector())
    return false; // Can't tell, just return false to be safe
  if (Ty.isScalar()) {
    std::optional<ValueAndVReg> Val =
        getIConstantVRegValWithLookThrough(ShiftAmount, MRI);
    if (!Val)
      return false;
    return Val->Value.ult(Ty.getScalarSizeInBits());
  }
  GBuildVector *BV = getOpcodeDef<GBuildVector>(ShiftAmount, MRI);
  if (!BV)
    return false;
  unsigned Sources = BV->getNumSources();
  for (unsigned I = 0; I < Sources; ++I) {
    std::optional<ValueAndVReg> Val =
        getIConstantVRegValWithLookThrough(BV->getSourceReg(I), MRI);
    if (!Val)
      return false;
    if (!Val->Value.ult(Ty.getScalarSizeInBits()))
      return false;
  }
  return true;
}
namespace {
enum class UndefPoisonKind {
  PoisonOnly = (1 << 0),
  UndefOnly = (1 << 1),
  UndefOrPoison = PoisonOnly | UndefOnly,
};
}
static bool includesPoison(UndefPoisonKind Kind) {
  return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
}
static bool includesUndef(UndefPoisonKind Kind) {
  return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
}
static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI,
                                   bool ConsiderFlagsAndMetadata,
                                   UndefPoisonKind Kind) {
  MachineInstr *RegDef = MRI.getVRegDef(Reg);
  if (ConsiderFlagsAndMetadata && includesPoison(Kind))
    if (auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
      if (GMI->hasPoisonGeneratingFlags())
        return true;
  // Check whether opcode is a poison/undef-generating operation.
  switch (RegDef->getOpcode()) {
  case TargetOpcode::G_BUILD_VECTOR:
  case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
    return false;
  case TargetOpcode::G_SHL:
  case TargetOpcode::G_ASHR:
  case TargetOpcode::G_LSHR:
    return includesPoison(Kind) &&
           !shiftAmountKnownInRange(RegDef->getOperand(2).getReg(), MRI);
  case TargetOpcode::G_FPTOSI:
  case TargetOpcode::G_FPTOUI:
    // fptosi/ui yields poison if the resulting value does not fit in the
    // destination type.
    return true;
  case TargetOpcode::G_CTLZ:
  case TargetOpcode::G_CTTZ:
  case TargetOpcode::G_ABS:
  case TargetOpcode::G_CTPOP:
  case TargetOpcode::G_BSWAP:
  case TargetOpcode::G_BITREVERSE:
  case TargetOpcode::G_FSHL:
  case TargetOpcode::G_FSHR:
  case TargetOpcode::G_SMAX:
  case TargetOpcode::G_SMIN:
  case TargetOpcode::G_UMAX:
  case TargetOpcode::G_UMIN:
  case TargetOpcode::G_PTRMASK:
  case TargetOpcode::G_SADDO:
  case TargetOpcode::G_SSUBO:
  case TargetOpcode::G_UADDO:
  case TargetOpcode::G_USUBO:
  case TargetOpcode::G_SMULO:
  case TargetOpcode::G_UMULO:
  case TargetOpcode::G_SADDSAT:
  case TargetOpcode::G_UADDSAT:
  case TargetOpcode::G_SSUBSAT:
  case TargetOpcode::G_USUBSAT:
    return false;
  case TargetOpcode::G_SSHLSAT:
  case TargetOpcode::G_USHLSAT:
    return includesPoison(Kind) &&
           !shiftAmountKnownInRange(RegDef->getOperand(2).getReg(), MRI);
  case TargetOpcode::G_INSERT_VECTOR_ELT: {
    GInsertVectorElement *Insert = cast<GInsertVectorElement>(RegDef);
    if (includesPoison(Kind)) {
      std::optional<ValueAndVReg> Index =
          getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
      if (!Index)
        return true;
      LLT VecTy = MRI.getType(Insert->getVectorReg());
      return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
    }
    return false;
  }
  case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
    GExtractVectorElement *Extract = cast<GExtractVectorElement>(RegDef);
    if (includesPoison(Kind)) {
      std::optional<ValueAndVReg> Index =
          getIConstantVRegValWithLookThrough(Extract->getIndexReg(), MRI);
      if (!Index)
        return true;
      LLT VecTy = MRI.getType(Extract->getVectorReg());
      return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
    }
    return false;
  }
  case TargetOpcode::G_SHUFFLE_VECTOR: {
    GShuffleVector *Shuffle = cast<GShuffleVector>(RegDef);
    ArrayRef<int> Mask = Shuffle->getMask();
    return includesPoison(Kind) && is_contained(Mask, -1);
  }
  case TargetOpcode::G_FNEG:
  case TargetOpcode::G_PHI:
  case TargetOpcode::G_SELECT:
  case TargetOpcode::G_UREM:
  case TargetOpcode::G_SREM:
  case TargetOpcode::G_FREEZE:
  case TargetOpcode::G_ICMP:
  case TargetOpcode::G_FCMP:
  case TargetOpcode::G_FADD:
  case TargetOpcode::G_FSUB:
  case TargetOpcode::G_FMUL:
  case TargetOpcode::G_FDIV:
  case TargetOpcode::G_FREM:
  case TargetOpcode::G_PTR_ADD:
    return false;
  default:
    return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
  }
}
static bool isGuaranteedNotToBeUndefOrPoison(Register Reg,
                                             const MachineRegisterInfo &MRI,
                                             unsigned Depth,
                                             UndefPoisonKind Kind) {
  if (Depth >= MaxAnalysisRecursionDepth)
    return false;
  MachineInstr *RegDef = MRI.getVRegDef(Reg);
  switch (RegDef->getOpcode()) {
  case TargetOpcode::G_FREEZE:
    return true;
  case TargetOpcode::G_IMPLICIT_DEF:
    return !includesUndef(Kind);
  case TargetOpcode::G_CONSTANT:
  case TargetOpcode::G_FCONSTANT:
    return true;
  case TargetOpcode::G_BUILD_VECTOR: {
    GBuildVector *BV = cast<GBuildVector>(RegDef);
    unsigned NumSources = BV->getNumSources();
    for (unsigned I = 0; I < NumSources; ++I)
      if (!::isGuaranteedNotToBeUndefOrPoison(BV->getSourceReg(I), MRI,
                                              Depth + 1, Kind))
        return false;
    return true;
  }
  case TargetOpcode::G_PHI: {
    GPhi *Phi = cast<GPhi>(RegDef);
    unsigned NumIncoming = Phi->getNumIncomingValues();
    for (unsigned I = 0; I < NumIncoming; ++I)
      if (!::isGuaranteedNotToBeUndefOrPoison(Phi->getIncomingValue(I), MRI,
                                              Depth + 1, Kind))
        return false;
    return true;
  }
  default: {
    auto MOCheck = [&](const MachineOperand &MO) {
      if (!MO.isReg())
        return true;
      return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,
                                                Kind);
    };
    return !::canCreateUndefOrPoison(Reg, MRI,
                                     /*ConsiderFlagsAndMetadata=*/true, Kind) &&
           all_of(RegDef->uses(), MOCheck);
  }
  }
}
bool llvm::canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI,
                                  bool ConsiderFlagsAndMetadata) {
  return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
                                  UndefPoisonKind::UndefOrPoison);
}
bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI,
                     bool ConsiderFlagsAndMetadata = true) {
  return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
                                  UndefPoisonKind::PoisonOnly);
}
bool llvm::isGuaranteedNotToBeUndefOrPoison(Register Reg,
                                            const MachineRegisterInfo &MRI,
                                            unsigned Depth) {
  return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
                                            UndefPoisonKind::UndefOrPoison);
}
bool llvm::isGuaranteedNotToBePoison(Register Reg,
                                     const MachineRegisterInfo &MRI,
                                     unsigned Depth) {
  return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
                                            UndefPoisonKind::PoisonOnly);
}
bool llvm::isGuaranteedNotToBeUndef(Register Reg,
                                    const MachineRegisterInfo &MRI,
                                    unsigned Depth) {
  return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
                                            UndefPoisonKind::UndefOnly);
}
Type *llvm::getTypeForLLT(LLT Ty, LLVMContext &C) {
  if (Ty.isVector())
    return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
                           Ty.getElementCount());
  return IntegerType::get(C, Ty.getSizeInBits());
}
APInt llvm::GIConstant::getScalarValue() const {
  assert(Kind == GIConstantKind::Scalar && "Expected scalar constant");
  return Value;
}
std::optional<GIConstant>
llvm::GIConstant::getConstant(Register Const, const MachineRegisterInfo &MRI) {
  MachineInstr *Constant = getDefIgnoringCopies(Const, MRI);
  if (GSplatVector *Splat = dyn_cast<GSplatVector>(Constant)) {
    std::optional<ValueAndVReg> MayBeConstant =
        getIConstantVRegValWithLookThrough(Splat->getScalarReg(), MRI);
    if (!MayBeConstant)
      return std::nullopt;
    return GIConstant(MayBeConstant->Value, GIConstantKind::ScalableVector);
  }
  if (GBuildVector *Build = dyn_cast<GBuildVector>(Constant)) {
    SmallVector<APInt> Values;
    unsigned NumSources = Build->getNumSources();
    for (unsigned I = 0; I < NumSources; ++I) {
      Register SrcReg = Build->getSourceReg(I);
      std::optional<ValueAndVReg> MayBeConstant =
          getIConstantVRegValWithLookThrough(SrcReg, MRI);
      if (!MayBeConstant)
        return std::nullopt;
      Values.push_back(MayBeConstant->Value);
    }
    return GIConstant(Values);
  }
  std::optional<ValueAndVReg> MayBeConstant =
      getIConstantVRegValWithLookThrough(Const, MRI);
  if (!MayBeConstant)
    return std::nullopt;
  return GIConstant(MayBeConstant->Value, GIConstantKind::Scalar);
}
APFloat llvm::GFConstant::getScalarValue() const {
  assert(Kind == GFConstantKind::Scalar && "Expected scalar constant");
  return Values[0];
}
std::optional<GFConstant>
llvm::GFConstant::getConstant(Register Const, const MachineRegisterInfo &MRI) {
  MachineInstr *Constant = getDefIgnoringCopies(Const, MRI);
  if (GSplatVector *Splat = dyn_cast<GSplatVector>(Constant)) {
    std::optional<FPValueAndVReg> MayBeConstant =
        getFConstantVRegValWithLookThrough(Splat->getScalarReg(), MRI);
    if (!MayBeConstant)
      return std::nullopt;
    return GFConstant(MayBeConstant->Value, GFConstantKind::ScalableVector);
  }
  if (GBuildVector *Build = dyn_cast<GBuildVector>(Constant)) {
    SmallVector<APFloat> Values;
    unsigned NumSources = Build->getNumSources();
    for (unsigned I = 0; I < NumSources; ++I) {
      Register SrcReg = Build->getSourceReg(I);
      std::optional<FPValueAndVReg> MayBeConstant =
          getFConstantVRegValWithLookThrough(SrcReg, MRI);
      if (!MayBeConstant)
        return std::nullopt;
      Values.push_back(MayBeConstant->Value);
    }
    return GFConstant(Values);
  }
  std::optional<FPValueAndVReg> MayBeConstant =
      getFConstantVRegValWithLookThrough(Const, MRI);
  if (!MayBeConstant)
    return std::nullopt;
  return GFConstant(MayBeConstant->Value, GFConstantKind::Scalar);
}
 |