| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
 1000
 1001
 1002
 1003
 1004
 1005
 1006
 1007
 1008
 1009
 1010
 1011
 1012
 1013
 1014
 1015
 1016
 1017
 1018
 1019
 1020
 1021
 1022
 1023
 1024
 1025
 1026
 1027
 1028
 1029
 1030
 1031
 1032
 1033
 1034
 1035
 1036
 1037
 1038
 1039
 1040
 1041
 1042
 1043
 1044
 1045
 1046
 1047
 1048
 1049
 1050
 1051
 1052
 1053
 1054
 1055
 1056
 1057
 1058
 1059
 1060
 1061
 1062
 1063
 1064
 1065
 1066
 1067
 1068
 1069
 1070
 1071
 1072
 1073
 1074
 1075
 1076
 1077
 1078
 1079
 1080
 1081
 1082
 1083
 1084
 1085
 1086
 1087
 1088
 1089
 1090
 1091
 1092
 1093
 1094
 1095
 1096
 1097
 1098
 1099
 1100
 1101
 1102
 1103
 1104
 1105
 1106
 1107
 1108
 1109
 1110
 1111
 1112
 1113
 1114
 1115
 1116
 1117
 1118
 1119
 1120
 1121
 1122
 1123
 1124
 1125
 1126
 1127
 1128
 1129
 1130
 1131
 1132
 1133
 1134
 1135
 1136
 1137
 1138
 1139
 1140
 1141
 1142
 1143
 1144
 1145
 1146
 1147
 1148
 1149
 1150
 1151
 1152
 1153
 1154
 1155
 1156
 1157
 1158
 1159
 1160
 1161
 1162
 1163
 1164
 1165
 1166
 1167
 1168
 1169
 1170
 1171
 1172
 1173
 1174
 1175
 1176
 1177
 1178
 1179
 1180
 1181
 1182
 1183
 1184
 1185
 1186
 1187
 1188
 1189
 1190
 1191
 1192
 1193
 1194
 1195
 1196
 1197
 1198
 1199
 1200
 1201
 1202
 1203
 1204
 1205
 1206
 1207
 1208
 1209
 1210
 1211
 1212
 1213
 1214
 1215
 1216
 1217
 1218
 1219
 1220
 1221
 1222
 1223
 1224
 1225
 1226
 1227
 1228
 1229
 1230
 1231
 1232
 1233
 1234
 1235
 1236
 1237
 1238
 1239
 1240
 1241
 1242
 1243
 1244
 1245
 1246
 1247
 1248
 1249
 1250
 1251
 1252
 1253
 1254
 1255
 1256
 1257
 1258
 1259
 1260
 1261
 1262
 1263
 1264
 1265
 1266
 1267
 1268
 1269
 1270
 1271
 1272
 1273
 1274
 1275
 1276
 1277
 1278
 1279
 1280
 1281
 1282
 1283
 1284
 1285
 1286
 1287
 1288
 1289
 1290
 1291
 1292
 1293
 1294
 1295
 1296
 1297
 1298
 1299
 1300
 1301
 1302
 1303
 1304
 1305
 1306
 1307
 1308
 1309
 1310
 1311
 1312
 1313
 1314
 1315
 1316
 1317
 1318
 1319
 1320
 1321
 1322
 1323
 1324
 1325
 1326
 1327
 1328
 1329
 1330
 1331
 1332
 1333
 1334
 1335
 1336
 1337
 1338
 1339
 1340
 1341
 1342
 1343
 1344
 1345
 1346
 1347
 1348
 1349
 1350
 1351
 1352
 1353
 1354
 1355
 1356
 1357
 1358
 1359
 1360
 1361
 1362
 1363
 1364
 1365
 1366
 1367
 1368
 1369
 1370
 1371
 1372
 1373
 1374
 1375
 1376
 1377
 1378
 1379
 1380
 1381
 1382
 1383
 1384
 1385
 1386
 1387
 1388
 1389
 1390
 1391
 1392
 1393
 1394
 1395
 1396
 1397
 1398
 1399
 1400
 1401
 1402
 1403
 1404
 1405
 1406
 1407
 1408
 1409
 1410
 1411
 1412
 1413
 1414
 1415
 1416
 1417
 1418
 1419
 1420
 1421
 1422
 1423
 1424
 1425
 1426
 1427
 1428
 1429
 1430
 1431
 1432
 1433
 1434
 1435
 1436
 1437
 1438
 1439
 1440
 1441
 1442
 1443
 1444
 1445
 1446
 1447
 1448
 1449
 1450
 1451
 1452
 1453
 1454
 1455
 1456
 1457
 1458
 1459
 1460
 1461
 1462
 1463
 1464
 1465
 1466
 1467
 1468
 1469
 1470
 1471
 1472
 1473
 1474
 1475
 1476
 1477
 1478
 1479
 1480
 1481
 1482
 1483
 1484
 1485
 1486
 1487
 1488
 1489
 1490
 1491
 1492
 1493
 1494
 1495
 1496
 1497
 1498
 1499
 1500
 1501
 1502
 1503
 1504
 1505
 1506
 1507
 1508
 1509
 1510
 1511
 1512
 1513
 1514
 1515
 1516
 1517
 1518
 1519
 1520
 1521
 1522
 1523
 1524
 1525
 1526
 1527
 1528
 1529
 1530
 1531
 1532
 1533
 1534
 1535
 1536
 1537
 1538
 1539
 1540
 1541
 1542
 1543
 1544
 1545
 1546
 1547
 1548
 1549
 1550
 1551
 1552
 1553
 1554
 1555
 1556
 1557
 1558
 1559
 1560
 1561
 1562
 1563
 1564
 1565
 1566
 1567
 1568
 1569
 1570
 1571
 1572
 1573
 1574
 1575
 1576
 1577
 1578
 1579
 1580
 1581
 1582
 1583
 1584
 1585
 1586
 1587
 1588
 1589
 1590
 1591
 1592
 1593
 1594
 1595
 1596
 1597
 1598
 1599
 1600
 1601
 1602
 1603
 1604
 1605
 1606
 1607
 1608
 1609
 1610
 1611
 1612
 1613
 1614
 1615
 1616
 1617
 1618
 1619
 1620
 1621
 1622
 1623
 1624
 1625
 1626
 1627
 1628
 1629
 1630
 1631
 1632
 1633
 1634
 1635
 1636
 1637
 1638
 1639
 1640
 1641
 1642
 1643
 1644
 1645
 1646
 1647
 1648
 1649
 1650
 1651
 1652
 1653
 1654
 1655
 1656
 1657
 1658
 1659
 1660
 1661
 1662
 1663
 1664
 1665
 1666
 1667
 1668
 1669
 1670
 1671
 1672
 1673
 1674
 1675
 1676
 1677
 1678
 1679
 1680
 1681
 1682
 1683
 1684
 1685
 1686
 1687
 1688
 1689
 1690
 1691
 1692
 1693
 1694
 1695
 1696
 1697
 1698
 1699
 1700
 1701
 1702
 1703
 1704
 1705
 1706
 1707
 1708
 1709
 1710
 1711
 1712
 1713
 1714
 1715
 1716
 1717
 1718
 1719
 1720
 1721
 1722
 1723
 1724
 1725
 1726
 1727
 1728
 1729
 1730
 1731
 1732
 1733
 1734
 1735
 1736
 1737
 1738
 1739
 1740
 1741
 1742
 1743
 1744
 1745
 1746
 1747
 1748
 1749
 1750
 1751
 1752
 1753
 1754
 1755
 1756
 1757
 1758
 1759
 1760
 1761
 1762
 1763
 1764
 1765
 1766
 1767
 1768
 1769
 1770
 1771
 1772
 1773
 1774
 1775
 1776
 1777
 1778
 1779
 1780
 1781
 1782
 1783
 1784
 1785
 1786
 1787
 1788
 
 | //===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// This file describes the RISC-V instructions from the standard 'V' Vector
/// extension, version 1.0.
///
//===----------------------------------------------------------------------===//
include "RISCVInstrFormatsV.td"
//===----------------------------------------------------------------------===//
// Operand and SDNode transformation definitions.
//===----------------------------------------------------------------------===//
class VTypeIAsmOperand<int VTypeINum> : AsmOperandClass {
  let Name = "VTypeI" # VTypeINum;
  let ParserMethod = "parseVTypeI";
  let DiagnosticType = "InvalidVTypeI";
  let RenderMethod = "addVTypeIOperands";
}
class VTypeIOp<int VTypeINum> : Operand<XLenVT> {
  let ParserMatchClass = VTypeIAsmOperand<VTypeINum>;
  let PrintMethod = "printVTypeI";
  let DecoderMethod = "decodeUImmOperand<"#VTypeINum#">";
  let OperandType = "OPERAND_VTYPEI" # VTypeINum;
  let OperandNamespace = "RISCVOp";
  let MCOperandPredicate = [{
    int64_t Imm;
    if (MCOp.evaluateAsConstantImm(Imm))
      return isUInt<VTypeINum>(Imm);
    return MCOp.isBareSymbolRef();
  }];
}
def VTypeIOp10 : VTypeIOp<10>;
def VTypeIOp11 : VTypeIOp<11>;
def VMaskAsmOperand : AsmOperandClass {
  let Name = "RVVMaskRegOpOperand";
  let RenderMethod = "addRegOperands";
  let PredicateMethod = "isV0Reg";
  let ParserMethod = "parseMaskReg";
  let IsOptional = 1;
  let DefaultMethod = "defaultMaskRegOp";
  let DiagnosticType = "InvalidVMaskRegister";
}
def VMaskOp : RegisterOperand<VMV0> {
  let ParserMatchClass = VMaskAsmOperand;
  let PrintMethod = "printVMaskReg";
  let EncoderMethod = "getVMaskReg";
  let DecoderMethod = "decodeVMaskReg";
}
def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
  let ParserMatchClass = SImmAsmOperand<5>;
  let EncoderMethod = "getImmOpValue";
  let DecoderMethod = "decodeSImmOperand<5>";
  let OperandType = "OPERAND_SIMM5";
  let OperandNamespace = "RISCVOp";
  let MCOperandPredicate = [{
    int64_t Imm;
    if (MCOp.evaluateAsConstantImm(Imm))
      return isInt<5>(Imm);
    return MCOp.isBareSymbolRef();
  }];
}
def SImm5Plus1AsmOperand : AsmOperandClass {
  let Name = "SImm5Plus1";
  let RenderMethod = "addImmOperands";
  let DiagnosticType = "InvalidSImm5Plus1";
}
def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
  [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
  let ParserMatchClass = SImm5Plus1AsmOperand;
  let OperandType = "OPERAND_SIMM5_PLUS1";
  let OperandNamespace = "RISCVOp";
  let MCOperandPredicate = [{
    int64_t Imm;
    if (MCOp.evaluateAsConstantImm(Imm))
      return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
    return MCOp.isBareSymbolRef();
  }];
}
def simm5_plus1_nonzero : ImmLeaf<XLenVT,
  [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>;
//===----------------------------------------------------------------------===//
// Scheduling definitions.
//===----------------------------------------------------------------------===//
class VMVRSched<int n> : Sched<[
  !cast<SchedReadWrite>("WriteVMov" #n #"V"),
  !cast<SchedReadWrite>("ReadVMov" #n #"V")
]>;
class VLESched<string lmul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVLDE_" #lmul),
  ReadVLDX, ReadVMask
]>;
class VSESched<string lmul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVSTE_" #lmul),
  !cast<SchedReadWrite>("ReadVSTEV_" #lmul),
  ReadVSTX, ReadVMask
]>;
class VLSSched<int eew, string emul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVLDS" #eew #"_" #emul),
  ReadVLDX, ReadVLDSX, ReadVMask
]>;
class VSSSched<int eew, string emul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVSTS" #eew #"_" #emul),
  !cast<SchedReadWrite>("ReadVSTS" #eew #"V_" #emul),
  ReadVSTX, ReadVSTSX, ReadVMask
]>;
class VLXSched<int dataEEW, string isOrdered,
               string dataEMUL = "WorstCase",
               string idxEMUL = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVLD" #isOrdered #"X" #dataEEW #"_" #dataEMUL),
  ReadVLDX,
  !cast<SchedReadWrite>("ReadVLD" #isOrdered #"XV_" #idxEMUL), ReadVMask
]>;
class VSXSched<int dataEEW, string isOrdered,
               string dataEMUL = "WorstCase",
               string idxEMUL = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVST" #isOrdered #"X" #dataEEW #"_" #dataEMUL),
  !cast<SchedReadWrite>("ReadVST" #isOrdered #"X" #dataEEW #"_" #dataEMUL),
  ReadVSTX, !cast<SchedReadWrite>("ReadVST" #isOrdered #"XV_" #idxEMUL), ReadVMask
]>;
class VLFSched<string lmul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVLDFF_" #lmul),
  ReadVLDX, ReadVMask
]>;
// Unit-Stride Segment Loads and Stores
class VLSEGSched<int nf, int eew, string emul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVLSEG" #nf #"e" #eew #"_" #emul),
  ReadVLDX, ReadVMask
]>;
class VSSEGSched<int nf, int eew, string emul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVSSEG" #nf #"e" #eew #"_" #emul),
  !cast<SchedReadWrite>("ReadVSTEV_" #emul),
  ReadVSTX, ReadVMask
]>;
class VLSEGFFSched<int nf, int eew, string emul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVLSEGFF" #nf #"e" #eew #"_" #emul),
  ReadVLDX, ReadVMask
]>;
// Strided Segment Loads and Stores
class VLSSEGSched<int nf, int eew, string emul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVLSSEG" #nf #"e" #eew #"_" #emul),
  ReadVLDX, ReadVLDSX, ReadVMask
]>;
class VSSSEGSched<int nf, int eew, string emul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVSSSEG" #nf #"e" #eew #"_" #emul),
  !cast<SchedReadWrite>("ReadVSTS" #eew #"V_" #emul),
  ReadVSTX, ReadVSTSX, ReadVMask
]>;
// Indexed Segment Loads and Stores
class VLXSEGSched<int nf, int eew, string isOrdered, string emul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVL" #isOrdered #"XSEG" #nf #"e" #eew #"_" #emul),
  ReadVLDX, !cast<SchedReadWrite>("ReadVLD" #isOrdered #"XV_" #emul), ReadVMask
]>;
class VSXSEGSched<int nf, int eew, string isOrdered, string emul = "WorstCase"> : Sched<[
  !cast<SchedReadWrite>("WriteVS" #isOrdered #"XSEG" #nf #"e" #eew #"_" #emul),
  !cast<SchedReadWrite>("ReadVST" #isOrdered #"X" #eew #"_" #emul),
  ReadVSTX, !cast<SchedReadWrite>("ReadVST" #isOrdered #"XV_" #emul), ReadVMask
]>;
//===----------------------------------------------------------------------===//
// Instruction class templates
//===----------------------------------------------------------------------===//
let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
// unit-stride load vd, (rs1), vm
class VUnitStrideLoad<RISCVWidth width, string opcodestr>
    : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
                (outs VR:$vd),
                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
let vm = 1, RVVConstraint = NoConstraint in {
// unit-stride whole register load vl<nf>r.v vd, (rs1)
class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
                width.Value{2-0}, (outs VRC:$vd), (ins GPRMemZeroOffset:$rs1),
                opcodestr, "$vd, $rs1"> {
  let Uses = [];
}
// unit-stride mask load vd, (rs1)
class VUnitStrideLoadMask<string opcodestr>
    : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
                (outs VR:$vd),
                (ins GPRMemZeroOffset:$rs1), opcodestr, "$vd, $rs1">;
} // vm = 1, RVVConstraint = NoConstraint
// unit-stride fault-only-first load vd, (rs1), vm
class VUnitStrideLoadFF<RISCVWidth width, string opcodestr>
    : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
                (outs VR:$vd),
                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
// strided load vd, (rs1), rs2, vm
class VStridedLoad<RISCVWidth width, string opcodestr>
    : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
                (outs VR:$vd),
                (ins GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
                "$vd, $rs1, $rs2$vm">;
// indexed load vd, (rs1), vs2, vm
class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
    : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
                (outs VR:$vd),
                (ins GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
                "$vd, $rs1, $vs2$vm">;
// unit-stride segment load vd, (rs1), vm
class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
                (outs VR:$vd),
                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
// segment fault-only-first load vd, (rs1), vm
class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
                (outs VR:$vd),
                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
// strided segment load vd, (rs1), rs2, vm
class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
    : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
                (outs VR:$vd),
                (ins GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
                "$vd, $rs1, $rs2$vm">;
// indexed segment load vd, (rs1), vs2, vm
class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
                          string opcodestr>
    : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
                (outs VR:$vd),
                (ins GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
                "$vd, $rs1, $vs2$vm">;
} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
// unit-stride store vd, vs3, (rs1), vm
class VUnitStrideStore<RISCVWidth width, string opcodestr>
    : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
                (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr,
                "$vs3, ${rs1}$vm">;
let vm = 1 in {
// vs<nf>r.v vd, (rs1)
class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
    : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
                0b000, (outs), (ins VRC:$vs3, GPRMemZeroOffset:$rs1),
                opcodestr, "$vs3, $rs1"> {
  let Uses = [];
}
// unit-stride mask store vd, vs3, (rs1)
class VUnitStrideStoreMask<string opcodestr>
    : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
                (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1), opcodestr,
                "$vs3, $rs1">;
} // vm = 1
// strided store vd, vs3, (rs1), rs2, vm
class VStridedStore<RISCVWidth width, string opcodestr>
    : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
                (ins VR:$vs3, GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm),
                opcodestr, "$vs3, $rs1, $rs2$vm">;
// indexed store vd, vs3, (rs1), vs2, vm
class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
    : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
                (ins VR:$vs3, GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm),
                opcodestr, "$vs3, $rs1, $vs2$vm">;
// segment store vd, vs3, (rs1), vm
class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
    : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
                (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr,
                "$vs3, ${rs1}$vm">;
// segment store vd, vs3, (rs1), rs2, vm
class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
    : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
                (ins VR:$vs3, GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm),
                opcodestr, "$vs3, $rs1, $rs2$vm">;
// segment store vd, vs3, (rs1), vs2, vm
class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
                           string opcodestr>
    : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
                (ins VR:$vs3, GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm),
                opcodestr, "$vs3, $rs1, $vs2$vm">;
} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
// op vd, vs2, vs1, vm
class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
    : RVInstVV<funct6, opv, (outs VR:$vd),
                (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
                opcodestr, "$vd, $vs2, $vs1$vm">;
// op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
    : RVInstVV<funct6, opv, (outs VR:$vd),
                (ins VR:$vs2, VR:$vs1, VMV0:$v0),
                opcodestr, "$vd, $vs2, $vs1, v0"> {
  let vm = 0;
}
// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
    : RVInstVV<funct6, opv, (outs VR:$vd),
                (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
                opcodestr, "$vd, $vs1, $vs2$vm">;
// op vd, vs2, vs1
class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
    : RVInstVV<funct6, opv, (outs VR:$vd),
               (ins VR:$vs2, VR:$vs1),
               opcodestr, "$vd, $vs2, $vs1"> {
  let vm = 1;
}
// op vd, vs2, rs1, vm
class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
    : RVInstVX<funct6, opv, (outs VR:$vd),
                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
                opcodestr, "$vd, $vs2, $rs1$vm">;
// op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
    : RVInstVX<funct6, opv, (outs VR:$vd),
                (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
                opcodestr, "$vd, $vs2, $rs1, v0"> {
  let vm = 0;
}
// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
    : RVInstVX<funct6, opv, (outs VR:$vd),
                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
                opcodestr, "$vd, $rs1, $vs2$vm">;
// op vd, vs1, vs2
class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
    : RVInstVX<funct6, opv, (outs VR:$vd),
               (ins VR:$vs2, GPR:$rs1),
               opcodestr, "$vd, $vs2, $rs1"> {
  let vm = 1;
}
// op vd, vs2, imm, vm
class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
    : RVInstIVI<funct6, (outs VR:$vd),
                (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
                opcodestr, "$vd, $vs2, $imm$vm">;
// op vd, vs2, imm, v0 (without mask, use v0 as carry input)
class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
    : RVInstIVI<funct6, (outs VR:$vd),
                (ins VR:$vs2, optype:$imm, VMV0:$v0),
                opcodestr, "$vd, $vs2, $imm, v0"> {
  let vm = 0;
}
// op vd, vs2, imm, vm
class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
    : RVInstIVI<funct6, (outs VR:$vd),
                (ins VR:$vs2, optype:$imm),
                opcodestr, "$vd, $vs2, $imm"> {
  let vm = 1;
}
// op vd, vs2, rs1, vm (Float)
class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
    : RVInstVX<funct6, opv, (outs VR:$vd),
                (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
                opcodestr, "$vd, $vs2, $rs1$vm">;
// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
    : RVInstVX<funct6, opv, (outs VR:$vd),
                (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
                opcodestr, "$vd, $rs1, $vs2$vm">;
// op vd, vs2, vm (use vs1 as instruction encoding)
class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
    : RVInstV<funct6, vs1, opv, (outs VR:$vd),
               (ins VR:$vs2, VMaskOp:$vm),
               opcodestr, "$vd, $vs2$vm">;
// op vd, vs2 (use vs1 as instruction encoding)
class VALUVs2NoVm<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
    : RVInstV<funct6, vs1, opv, (outs VR:$vd),
              (ins VR:$vs2), opcodestr,
              "$vd, $vs2"> {
  let vm = 1;
}
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
//===----------------------------------------------------------------------===//
// Combination of instruction classes.
// Use these multiclasses to define instructions more easily.
//===----------------------------------------------------------------------===//
multiclass VIndexLoadStore<list<int> EEWList> {
  foreach n = EEWList in {
    defvar w = !cast<RISCVWidth>("LSWidth" # n);
    def VLUXEI # n # _V :
      VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # n # ".v">,
      VLXSched<n, "U">;
    def VLOXEI # n # _V :
      VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # n # ".v">,
      VLXSched<n, "O">;
    def VSUXEI # n # _V :
      VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # n # ".v">,
      VSXSched<n, "U">;
    def VSOXEI # n # _V :
      VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # n # ".v">,
      VSXSched<n, "O">;
  }
}
multiclass VALU_IV_V<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
           Sched<[WriteVIALUV_WorstCase, ReadVIALUV_WorstCase,
                  ReadVIALUV_WorstCase, ReadVMask]>;
}
multiclass VALU_IV_X<string opcodestr, bits<6> funct6> {
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
           Sched<[WriteVIALUX_WorstCase, ReadVIALUV_WorstCase,
                  ReadVIALUX_WorstCase, ReadVMask]>;
}
multiclass VALU_IV_I<string opcodestr, bits<6> funct6> {
  def I  : VALUVI<funct6, opcodestr # ".vi", simm5>,
           Sched<[WriteVIALUI_WorstCase, ReadVIALUV_WorstCase,
                  ReadVMask]>;
}
multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6>
    : VALU_IV_V<opcodestr, funct6>,
      VALU_IV_X<opcodestr, funct6>,
      VALU_IV_I<opcodestr, funct6>;
multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6>
    : VALU_IV_V<opcodestr, funct6>,
      VALU_IV_X<opcodestr, funct6>;
multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6>
    : VALU_IV_X<opcodestr, funct6>,
      VALU_IV_I<opcodestr, funct6>;
multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw> {
  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
           Sched<[WriteVIWALUV_WorstCase, ReadVIWALUV_WorstCase,
                  ReadVIWALUV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
           Sched<[WriteVIWALUX_WorstCase, ReadVIWALUV_WorstCase,
                  ReadVIWALUX_WorstCase, ReadVMask]>;
}
multiclass VMAC_MV_V_X<string opcodestr, bits<6> funct6> {
  def V : VALUrVV<funct6, OPMVV, opcodestr # ".vv">,
          Sched<[WriteVIMulAddV_WorstCase, ReadVIMulAddV_WorstCase,
                 ReadVIMulAddV_WorstCase, ReadVMask]>;
  def X : VALUrVX<funct6, OPMVX, opcodestr # ".vx">,
          Sched<[WriteVIMulAddX_WorstCase, ReadVIMulAddV_WorstCase,
                 ReadVIMulAddX_WorstCase, ReadVMask]>;
}
multiclass VWMAC_MV_X<string opcodestr, bits<6> funct6> {
  def X : VALUrVX<funct6, OPMVX, opcodestr # ".vx">,
          Sched<[WriteVIWMulAddX_WorstCase, ReadVIWMulAddV_WorstCase,
                 ReadVIWMulAddX_WorstCase, ReadVMask]>;
}
multiclass VWMAC_MV_V_X<string opcodestr, bits<6> funct6>
   : VWMAC_MV_X<opcodestr, funct6> {
  def V : VALUrVV<funct6, OPMVV, opcodestr # ".vv">,
          Sched<[WriteVIWMulAddV_WorstCase, ReadVIWMulAddV_WorstCase,
                 ReadVIWMulAddV_WorstCase, ReadVMask]>;
}
multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
           Sched<[WriteVExtV_WorstCase, ReadVExtV_WorstCase, ReadVMask]>;
}
multiclass VMRG_IV_V_X_I<string opcodestr, bits<6> funct6> {
  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
           Sched<[WriteVIMergeV_WorstCase, ReadVIMergeV_WorstCase,
                  ReadVIMergeV_WorstCase, ReadVMask]>;
  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
           Sched<[WriteVIMergeX_WorstCase, ReadVIMergeV_WorstCase,
                  ReadVIMergeX_WorstCase, ReadVMask]>;
  def IM : VALUmVI<funct6, opcodestr # ".vim">,
           Sched<[WriteVIMergeI_WorstCase, ReadVIMergeV_WorstCase,
                  ReadVMask]>;
}
multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
           Sched<[WriteVICALUV_WorstCase, ReadVICALUV_WorstCase,
                  ReadVICALUV_WorstCase, ReadVMask]>;
  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
           Sched<[WriteVICALUX_WorstCase, ReadVICALUV_WorstCase,
                  ReadVICALUX_WorstCase, ReadVMask]>;
}
multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6>
    : VALUm_IV_V_X<opcodestr, funct6> {
  def IM : VALUmVI<funct6, opcodestr # ".vim">,
           Sched<[WriteVICALUI_WorstCase, ReadVICALUV_WorstCase,
                  ReadVMask]>;
}
multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
          Sched<[WriteVICALUV_WorstCase, ReadVICALUV_WorstCase,
                 ReadVICALUV_WorstCase]>;
  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
          Sched<[WriteVICALUX_WorstCase, ReadVICALUV_WorstCase,
                 ReadVICALUX_WorstCase]>;
}
multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6>
   : VALUNoVm_IV_V_X<opcodestr, funct6> {
  def I : VALUVINoVm<funct6, opcodestr # ".vi", simm5>,
          Sched<[WriteVICALUI_WorstCase, ReadVICALUV_WorstCase]>;
}
multiclass VALU_FV_F<string opcodestr, bits<6> funct6> {
  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
          Sched<[WriteVFALUF_WorstCase, ReadVFALUV_WorstCase,
                 ReadVFALUF_WorstCase, ReadVMask]>;
}
multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6>
    : VALU_FV_F<opcodestr, funct6> {
  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
          Sched<[WriteVFALUV_WorstCase, ReadVFALUV_WorstCase,
                 ReadVFALUV_WorstCase, ReadVMask]>;
}
multiclass VWALU_FV_V_F<string opcodestr, bits<6> funct6, string vw> {
  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
          Sched<[WriteVFWALUV_WorstCase, ReadVFWALUV_WorstCase,
                 ReadVFWALUV_WorstCase, ReadVMask]>;
  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
          Sched<[WriteVFWALUF_WorstCase, ReadVFWALUV_WorstCase,
                 ReadVFWALUF_WorstCase, ReadVMask]>;
}
multiclass VMUL_FV_V_F<string opcodestr, bits<6> funct6> {
  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
          Sched<[WriteVFMulV_WorstCase, ReadVFMulV_WorstCase,
                 ReadVFMulV_WorstCase, ReadVMask]>;
  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
          Sched<[WriteVFMulF_WorstCase, ReadVFMulV_WorstCase,
                 ReadVFMulF_WorstCase, ReadVMask]>;
}
multiclass VDIV_FV_F<string opcodestr, bits<6> funct6> {
  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
          Sched<[WriteVFDivF_WorstCase, ReadVFDivV_WorstCase,
                 ReadVFDivF_WorstCase, ReadVMask]>;
}
multiclass VDIV_FV_V_F<string opcodestr, bits<6> funct6>
    : VDIV_FV_F<opcodestr, funct6> {
  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
          Sched<[WriteVFDivV_WorstCase, ReadVFDivV_WorstCase,
                 ReadVFDivV_WorstCase, ReadVMask]>;
}
multiclass VWMUL_FV_V_F<string opcodestr, bits<6> funct6> {
  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
          Sched<[WriteVFWMulV_WorstCase, ReadVFWMulV_WorstCase,
                 ReadVFWMulV_WorstCase, ReadVMask]>;
  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
          Sched<[WriteVFWMulF_WorstCase, ReadVFWMulV_WorstCase,
                 ReadVFWMulF_WorstCase, ReadVMask]>;
}
multiclass VMAC_FV_V_F<string opcodestr, bits<6> funct6> {
  def V : VALUrVV<funct6, OPFVV, opcodestr # ".vv">,
          Sched<[WriteVFMulAddV_WorstCase, ReadVFMulAddV_WorstCase,
                 ReadVFMulAddV_WorstCase, ReadVMask]>;
  def F : VALUrVF<funct6, OPFVF, opcodestr # ".vf">,
          Sched<[WriteVFMulAddF_WorstCase, ReadVFMulAddV_WorstCase,
                 ReadVFMulAddF_WorstCase, ReadVMask]>;
}
multiclass VWMAC_FV_V_F<string opcodestr, bits<6> funct6> {
  def V : VALUrVV<funct6, OPFVV, opcodestr # ".vv">,
          Sched<[WriteVFWMulAddV_WorstCase, ReadVFWMulAddV_WorstCase,
                 ReadVFWMulAddV_WorstCase, ReadVMask]>;
  def F : VALUrVF<funct6, OPFVF, opcodestr # ".vf">,
          Sched<[WriteVFWMulAddF_WorstCase, ReadVFWMulAddV_WorstCase,
                 ReadVFWMulAddF_WorstCase, ReadVMask]>;
}
multiclass VSQR_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFSqrtV_WorstCase, ReadVFSqrtV_WorstCase,
                  ReadVMask]>;
}
multiclass VRCP_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFRecpV_WorstCase, ReadVFRecpV_WorstCase,
                  ReadVMask]>;
}
multiclass VMINMAX_FV_V_F<string opcodestr, bits<6> funct6> {
  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
          Sched<[WriteVFMinMaxV_WorstCase, ReadVFMinMaxV_WorstCase,
                 ReadVFMinMaxV_WorstCase, ReadVMask]>;
  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
          Sched<[WriteVFMinMaxF_WorstCase, ReadVFMinMaxV_WorstCase,
                 ReadVFMinMaxF_WorstCase, ReadVMask]>;
}
multiclass VCMP_FV_F<string opcodestr, bits<6> funct6> {
  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
          Sched<[WriteVFCmpF_WorstCase, ReadVFCmpV_WorstCase,
                 ReadVFCmpF_WorstCase, ReadVMask]>;
}
multiclass VCMP_FV_V_F<string opcodestr, bits<6> funct6>
    : VCMP_FV_F<opcodestr, funct6> {
  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
          Sched<[WriteVFCmpV_WorstCase, ReadVFCmpV_WorstCase,
                 ReadVFCmpV_WorstCase, ReadVMask]>;
}
multiclass VSGNJ_FV_V_F<string opcodestr, bits<6> funct6> {
  def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
          Sched<[WriteVFSgnjV_WorstCase, ReadVFSgnjV_WorstCase,
                 ReadVFSgnjV_WorstCase, ReadVMask]>;
  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
          Sched<[WriteVFSgnjF_WorstCase, ReadVFSgnjV_WorstCase,
                 ReadVFSgnjF_WorstCase, ReadVMask]>;
}
multiclass VCLS_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFClassV_WorstCase, ReadVFClassV_WorstCase,
                  ReadVMask]>;
}
multiclass VCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFCvtIToFV_WorstCase, ReadVFCvtIToFV_WorstCase,
                  ReadVMask]>;
}
multiclass VCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFCvtFToIV_WorstCase, ReadVFCvtFToIV_WorstCase,
                  ReadVMask]>;
}
multiclass VWCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFWCvtIToFV_WorstCase, ReadVFWCvtIToFV_WorstCase,
                  ReadVMask]>;
}
multiclass VWCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFWCvtFToIV_WorstCase, ReadVFWCvtFToIV_WorstCase,
                  ReadVMask]>;
}
multiclass VWCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFWCvtFToFV_WorstCase, ReadVFWCvtFToFV_WorstCase,
                  ReadVMask]>;
}
multiclass VNCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFNCvtIToFV_WorstCase, ReadVFNCvtIToFV_WorstCase,
                  ReadVMask]>;
}
multiclass VNCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFNCvtFToIV_WorstCase, ReadVFNCvtFToIV_WorstCase,
                  ReadVMask]>;
}
multiclass VNCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
           Sched<[WriteVFNCvtFToFV_WorstCase, ReadVFNCvtFToFV_WorstCase,
                  ReadVMask]>;
}
multiclass VRED_MV_V<string opcodestr, bits<6> funct6> {
  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
            Sched<[WriteVIRedV_From_WorstCase, ReadVIRedV, ReadVIRedV0,
                   ReadVMask]>;
}
multiclass VREDMINMAX_MV_V<string opcodestr, bits<6> funct6> {
  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
            Sched<[WriteVIRedMinMaxV_From_WorstCase, ReadVIRedV, ReadVIRedV0,
                   ReadVMask]>;
}
multiclass VWRED_IV_V<string opcodestr, bits<6> funct6> {
  def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">,
            Sched<[WriteVIWRedV_From_WorstCase, ReadVIWRedV, ReadVIWRedV0,
                   ReadVMask]>;
}
multiclass VRED_FV_V<string opcodestr, bits<6> funct6> {
  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
            Sched<[WriteVFRedV_From_WorstCase, ReadVFRedV, ReadVFRedV0,
                   ReadVMask]>;
}
multiclass VREDMINMAX_FV_V<string opcodestr, bits<6> funct6> {
  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
            Sched<[WriteVFRedMinMaxV_From_WorstCase, ReadVFRedV, ReadVFRedV0,
                   ReadVMask]>;
}
multiclass VREDO_FV_V<string opcodestr, bits<6> funct6> {
  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
            Sched<[WriteVFRedOV_From_WorstCase, ReadVFRedOV, ReadVFRedOV0,
                   ReadVMask]>;
}
multiclass VWRED_FV_V<string opcodestr, bits<6> funct6> {
  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
            Sched<[WriteVFWRedV_From_WorstCase, ReadVFWRedV, ReadVFWRedV0,
                   ReadVMask]>;
}
multiclass VWREDO_FV_V<string opcodestr, bits<6> funct6> {
  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
            Sched<[WriteVFWRedOV_From_WorstCase, ReadVFWRedOV, ReadVFWRedOV0,
                   ReadVMask]>;
}
multiclass VMALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
  def M : VALUVVNoVm<funct6, OPMVV, opcodestr #"." #vm #"m">,
          Sched<[WriteVMALUV_WorstCase, ReadVMALUV_WorstCase,
                 ReadVMALUV_WorstCase]>;
}
multiclass VMSFS_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
           Sched<[WriteVMSFSV_WorstCase, ReadVMSFSV_WorstCase, ReadVMask]>;
}
multiclass VMIOT_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
           Sched<[WriteVMIotV_WorstCase, ReadVMIotV_WorstCase, ReadVMask]>;
}
multiclass VSHT_IV_V_X_I<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
           Sched<[WriteVShiftV_WorstCase, ReadVShiftV_WorstCase,
                  ReadVShiftV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
           Sched<[WriteVShiftX_WorstCase, ReadVShiftV_WorstCase,
                  ReadVShiftX_WorstCase, ReadVMask]>;
  def I  : VALUVI<funct6, opcodestr # ".vi", uimm5>,
           Sched<[WriteVShiftI_WorstCase, ReadVShiftV_WorstCase,
                  ReadVMask]>;
}
multiclass VNSHT_IV_V_X_I<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPIVV, opcodestr # ".wv">,
           Sched<[WriteVNShiftV_WorstCase, ReadVNShiftV_WorstCase,
                  ReadVNShiftV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".wx">,
           Sched<[WriteVNShiftX_WorstCase, ReadVNShiftV_WorstCase,
                  ReadVNShiftX_WorstCase, ReadVMask]>;
  def I  : VALUVI<funct6, opcodestr # ".wi", uimm5>,
           Sched<[WriteVNShiftI_WorstCase, ReadVNShiftV_WorstCase,
                  ReadVMask]>;
}
multiclass VMINMAX_IV_V_X<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
           Sched<[WriteVIMinMaxV_WorstCase, ReadVIMinMaxV_WorstCase,
                  ReadVIMinMaxV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
           Sched<[WriteVIMinMaxX_WorstCase, ReadVIMinMaxV_WorstCase,
                  ReadVIMinMaxX_WorstCase, ReadVMask]>;
}
multiclass VCMP_IV_V<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
           Sched<[WriteVICmpV_WorstCase, ReadVICmpV_WorstCase,
                  ReadVICmpV_WorstCase, ReadVMask]>;
}
multiclass VCMP_IV_X<string opcodestr, bits<6> funct6> {
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
           Sched<[WriteVICmpX_WorstCase, ReadVICmpV_WorstCase,
                  ReadVICmpX_WorstCase, ReadVMask]>;
}
multiclass VCMP_IV_I<string opcodestr, bits<6> funct6> {
  def I  : VALUVI<funct6, opcodestr # ".vi", simm5>,
           Sched<[WriteVICmpI_WorstCase, ReadVICmpV_WorstCase,
                  ReadVMask]>;
}
multiclass VCMP_IV_V_X_I<string opcodestr, bits<6> funct6>
    : VCMP_IV_V<opcodestr, funct6>,
      VCMP_IV_X<opcodestr, funct6>,
      VCMP_IV_I<opcodestr, funct6>;
multiclass VCMP_IV_X_I<string opcodestr, bits<6> funct6>
    : VCMP_IV_X<opcodestr, funct6>,
      VCMP_IV_I<opcodestr, funct6>;
multiclass VCMP_IV_V_X<string opcodestr, bits<6> funct6>
    : VCMP_IV_V<opcodestr, funct6>,
      VCMP_IV_X<opcodestr, funct6>;
multiclass VMUL_MV_V_X<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
           Sched<[WriteVIMulV_WorstCase, ReadVIMulV_WorstCase,
                  ReadVIMulV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
           Sched<[WriteVIMulX_WorstCase, ReadVIMulV_WorstCase,
                  ReadVIMulX_WorstCase, ReadVMask]>;
}
multiclass VWMUL_MV_V_X<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
           Sched<[WriteVIWMulV_WorstCase, ReadVIWMulV_WorstCase,
                  ReadVIWMulV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
           Sched<[WriteVIWMulX_WorstCase, ReadVIWMulV_WorstCase,
                  ReadVIWMulX_WorstCase, ReadVMask]>;
}
multiclass VDIV_MV_V_X<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
           Sched<[WriteVIDivV_WorstCase, ReadVIDivV_WorstCase,
                  ReadVIDivV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
           Sched<[WriteVIDivX_WorstCase, ReadVIDivV_WorstCase,
                  ReadVIDivX_WorstCase, ReadVMask]>;
}
multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
           Sched<[WriteVSALUV_WorstCase, ReadVSALUV_WorstCase,
                  ReadVSALUV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
           Sched<[WriteVSALUX_WorstCase, ReadVSALUV_WorstCase,
                  ReadVSALUX_WorstCase, ReadVMask]>;
}
multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6>
    : VSALU_IV_V_X<opcodestr, funct6> {
  def I  : VALUVI<funct6, opcodestr # ".vi", simm5>,
           Sched<[WriteVSALUI_WorstCase, ReadVSALUV_WorstCase,
                  ReadVMask]>;
}
multiclass VAALU_MV_V_X<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
           Sched<[WriteVAALUV_WorstCase, ReadVAALUV_WorstCase,
                  ReadVAALUV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
           Sched<[WriteVAALUX_WorstCase, ReadVAALUV_WorstCase,
                  ReadVAALUX_WorstCase, ReadVMask]>;
}
multiclass VSMUL_IV_V_X<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
           Sched<[WriteVSMulV_WorstCase, ReadVSMulV_WorstCase,
                  ReadVSMulV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
           Sched<[WriteVSMulX_WorstCase, ReadVSMulV_WorstCase,
                  ReadVSMulX_WorstCase, ReadVMask]>;
}
multiclass VSSHF_IV_V_X_I<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
           Sched<[WriteVSShiftV_WorstCase, ReadVSShiftV_WorstCase,
                  ReadVSShiftV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
           Sched<[WriteVSShiftX_WorstCase, ReadVSShiftV_WorstCase,
                  ReadVSShiftX_WorstCase, ReadVMask]>;
  def I  : VALUVI<funct6, opcodestr # ".vi", uimm5>,
           Sched<[WriteVSShiftI_WorstCase, ReadVSShiftV_WorstCase,
                  ReadVMask]>;
}
multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPIVV, opcodestr # ".wv">,
           Sched<[WriteVNClipV_WorstCase, ReadVNClipV_WorstCase,
                  ReadVNClipV_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".wx">,
           Sched<[WriteVNClipX_WorstCase, ReadVNClipV_WorstCase,
                  ReadVNClipX_WorstCase, ReadVMask]>;
  def I  : VALUVI<funct6, opcodestr # ".wi", uimm5>,
           Sched<[WriteVNClipI_WorstCase, ReadVNClipV_WorstCase,
                  ReadVMask]>;
}
multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6> {
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
           Sched<[WriteVISlideX_WorstCase, ReadVISlideV_WorstCase,
                  ReadVISlideX_WorstCase, ReadVMask]>;
  def I  : VALUVI<funct6, opcodestr # ".vi", uimm5>,
           Sched<[WriteVISlideI_WorstCase, ReadVISlideV_WorstCase,
                  ReadVMask]>;
}
multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6> {
  def X  : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
           Sched<[WriteVISlide1X_WorstCase, ReadVISlideV_WorstCase,
                  ReadVISlideX_WorstCase, ReadVMask]>;
}
multiclass VSLD1_FV_F<string opcodestr, bits<6> funct6> {
  def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
          Sched<[WriteVFSlide1F_WorstCase, ReadVFSlideV_WorstCase,
                 ReadVFSlideF_WorstCase, ReadVMask]>;
}
multiclass VGTR_IV_V_X_I<string opcodestr, bits<6> funct6> {
  def V  : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
           Sched<[WriteVRGatherVV_WorstCase, ReadVRGatherVV_data_WorstCase,
                  ReadVRGatherVV_index_WorstCase, ReadVMask]>;
  def X  : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
           Sched<[WriteVRGatherVX_WorstCase, ReadVRGatherVX_data_WorstCase,
                  ReadVRGatherVX_index_WorstCase, ReadVMask]>;
  def I  : VALUVI<funct6, opcodestr # ".vi", uimm5>,
           Sched<[WriteVRGatherVI_WorstCase, ReadVRGatherVI_data_WorstCase,
                  ReadVMask]>;
}
multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
  def M  : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
           Sched<[WriteVCompressV_WorstCase, ReadVCompressV_WorstCase,
                  ReadVCompressV_WorstCase]>;
}
multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
  foreach l = [8, 16, 32] in {
    defvar w = !cast<RISCVWidth>("LSWidth" # l);
    defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R");
    def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>,
                     Sched<[s, ReadVLDX]>;
  }
}
multiclass VWholeLoadEEW64<bits<3> nf, string opcodestr, RegisterClass VRC, SchedReadWrite schedrw> {
  def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>,
              Sched<[schedrw, ReadVLDX]>;
}
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
let Predicates = [HasVInstructions] in {
let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp11:$vtypei),
                           "vsetvli", "$rd, $rs1, $vtypei">,
                           Sched<[WriteVSETVLI, ReadVSETVLI]>;
def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp10:$vtypei),
                             "vsetivli", "$rd, $uimm, $vtypei">,
                             Sched<[WriteVSETIVLI]>;
def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
                         "vsetvl", "$rd, $rs1, $rs2">,
                          Sched<[WriteVSETVL, ReadVSETVL, ReadVSETVL]>;
} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
foreach eew = [8, 16, 32] in {
  defvar w = !cast<RISCVWidth>("LSWidth" # eew);
  // Vector Unit-Stride Instructions
  def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESched;
  def VSE#eew#_V  : VUnitStrideStore<w,  "vse"#eew#".v">, VSESched;
  // Vector Unit-Stride Fault-only-First Loads
  def VLE#eew#FF_V : VUnitStrideLoadFF<w,  "vle"#eew#"ff.v">, VLFSched;
  // Vector Strided Instructions
  def VLSE#eew#_V  : VStridedLoad<w,  "vlse"#eew#".v">, VLSSched<eew>;
  def VSSE#eew#_V  : VStridedStore<w,  "vsse"#eew#".v">, VSSSched<eew>;
}
defm "" : VIndexLoadStore<[8, 16, 32]>;
} // Predicates = [HasVInstructions]
let Predicates = [HasVInstructions] in {
def VLM_V : VUnitStrideLoadMask<"vlm.v">,
             Sched<[WriteVLDM_WorstCase, ReadVLDX]>;
def VSM_V : VUnitStrideStoreMask<"vsm.v">,
             Sched<[WriteVSTM_WorstCase, ReadVSTM_WorstCase, ReadVSTX]>;
def : InstAlias<"vle1.v $vd, (${rs1})",
                (VLM_V VR:$vd, GPR:$rs1), 0>;
def : InstAlias<"vse1.v $vs3, (${rs1})",
                (VSM_V VR:$vs3, GPR:$rs1), 0>;
defm VL1R : VWholeLoadN<0, "vl1r", VR>;
defm VL2R : VWholeLoadN<1, "vl2r", VRM2>;
defm VL4R : VWholeLoadN<3, "vl4r", VRM4>;
defm VL8R : VWholeLoadN<7, "vl8r", VRM8>;
def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
             Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>;
def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
             Sched<[WriteVST2R, ReadVST2R, ReadVSTX]>;
def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
             Sched<[WriteVST4R, ReadVST4R, ReadVSTX]>;
def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
             Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>;
def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
} // Predicates = [HasVInstructions]
let Predicates = [HasVInstructionsI64] in {
// Vector Unit-Stride Instructions
def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">,
              VLESched;
def VLE64FF_V : VUnitStrideLoadFF<LSWidth64, "vle64ff.v">,
                VLFSched;
def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">,
              VSESched;
// Vector Strided Instructions
def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">,
               VLSSched<32>;
def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">,
               VSSSched<64>;
defm VL1R: VWholeLoadEEW64<0, "vl1r", VR, WriteVLD1R>;
defm VL2R: VWholeLoadEEW64<1, "vl2r", VRM2, WriteVLD2R>;
defm VL4R: VWholeLoadEEW64<3, "vl4r", VRM4, WriteVLD4R>;
defm VL8R: VWholeLoadEEW64<7, "vl8r", VRM8, WriteVLD8R>;
} // Predicates = [HasVInstructionsI64]
let Predicates = [IsRV64, HasVInstructionsI64] in {
  // Vector Indexed Instructions
  defm "" : VIndexLoadStore<[64]>;
} // [IsRV64, HasVInstructionsI64]
let Predicates = [HasVInstructions] in {
// Vector Single-Width Integer Add and Subtract
defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>;
// Vector Widening Integer Add/Subtract
// Refer to 11.2 Widening Vector Arithmetic Instructions
// The destination vector register group cannot overlap a source vector
// register group of a different element width (including the mask register
// if masked), otherwise an illegal instruction exception is raised.
let Constraints = "@earlyclobber $vd" in {
let RVVConstraint = WidenV in {
defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000, "v">;
defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010, "v">;
defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001, "v">;
defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011, "v">;
} // RVVConstraint = WidenV
// Set earlyclobber for following instructions for second and mask operands.
// This has the downside that the earlyclobber constraint is too coarse and
// will impose unnecessary restrictions by not allowing the destination to
// overlap with the first (wide) operand.
let RVVConstraint = WidenW in {
defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
} // RVVConstraint = WidenW
} // Constraints = "@earlyclobber $vd"
def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
                (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
def : InstAlias<"vwcvt.x.x.v $vd, $vs",
                (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>;
def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
                (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
def : InstAlias<"vwcvtu.x.x.v $vd, $vs",
                (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>;
// Vector Integer Extension
defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
// Vector Bitwise Logical Instructions
defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
def : InstAlias<"vnot.v $vd, $vs$vm",
                (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
def : InstAlias<"vnot.v $vd, $vs",
                (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>;
// Vector Single-Width Bit Shift Instructions
defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101>;
defm VSRL_V : VSHT_IV_V_X_I<"vsrl", 0b101000>;
defm VSRA_V : VSHT_IV_V_X_I<"vsra", 0b101001>;
// Vector Narrowing Integer Right Shift Instructions
// Refer to 11.3. Narrowing Vector Arithmetic Instructions
// The destination vector register group cannot overlap the first source
// vector register group (specified by vs2). The destination vector register
// group cannot overlap the mask register if used, unless LMUL=1.
let Constraints = "@earlyclobber $vd" in {
defm VNSRL_W : VNSHT_IV_V_X_I<"vnsrl", 0b101100>;
defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101>;
} // Constraints = "@earlyclobber $vd"
def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
                (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
def : InstAlias<"vncvt.x.x.w $vd, $vs",
                (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>;
// Vector Integer Comparison Instructions
let RVVConstraint = NoConstraint in {
defm VMSEQ_V : VCMP_IV_V_X_I<"vmseq", 0b011000>;
defm VMSNE_V : VCMP_IV_V_X_I<"vmsne", 0b011001>;
defm VMSLTU_V : VCMP_IV_V_X<"vmsltu", 0b011010>;
defm VMSLT_V : VCMP_IV_V_X<"vmslt", 0b011011>;
defm VMSLEU_V : VCMP_IV_V_X_I<"vmsleu", 0b011100>;
defm VMSLE_V : VCMP_IV_V_X_I<"vmsle", 0b011101>;
defm VMSGTU_V : VCMP_IV_X_I<"vmsgtu", 0b011110>;
defm VMSGT_V : VCMP_IV_X_I<"vmsgt", 0b011111>;
} // RVVConstraint = NoConstraint
def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
                (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
                (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
                (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
                (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
    mayStore = 0 in {
// For unsigned comparisons we need to special case 0 immediate to maintain
// the always true/false semantics we would invert if we just decremented the
// immediate like we do for signed. To match the GNU assembler we will use
// vmseq/vmsne.vv with the same register for both operands which we can't do
// from an InstAlias.
def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
                             [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
                             [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
// Handle signed with pseudos as well for more consistency in the
// implementation.
def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
                            [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
                            [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
}
let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
    mayStore = 0 in {
def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
                             (ins VR:$vs2, GPR:$rs1),
                             [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
                            (ins VR:$vs2, GPR:$rs1),
                            [], "vmsge.vx", "$vd, $vs2, $rs1">;
def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
                               (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
                               [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
                              (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
                              [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
                                 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
                                 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
                                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
                                [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
}
// Vector Integer Min/Max Instructions
defm VMINU_V : VMINMAX_IV_V_X<"vminu", 0b000100>;
defm VMIN_V : VMINMAX_IV_V_X<"vmin", 0b000101>;
defm VMAXU_V : VMINMAX_IV_V_X<"vmaxu", 0b000110>;
defm VMAX_V : VMINMAX_IV_V_X<"vmax", 0b000111>;
// Vector Single-Width Integer Multiply Instructions
defm VMUL_V : VMUL_MV_V_X<"vmul", 0b100101>;
defm VMULH_V : VMUL_MV_V_X<"vmulh", 0b100111>;
defm VMULHU_V : VMUL_MV_V_X<"vmulhu", 0b100100>;
defm VMULHSU_V : VMUL_MV_V_X<"vmulhsu", 0b100110>;
// Vector Integer Divide Instructions
defm VDIVU_V : VDIV_MV_V_X<"vdivu", 0b100000>;
defm VDIV_V : VDIV_MV_V_X<"vdiv", 0b100001>;
defm VREMU_V : VDIV_MV_V_X<"vremu", 0b100010>;
defm VREM_V : VDIV_MV_V_X<"vrem", 0b100011>;
// Vector Widening Integer Multiply Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
defm VWMUL_V : VWMUL_MV_V_X<"vwmul", 0b111011>;
defm VWMULU_V : VWMUL_MV_V_X<"vwmulu", 0b111000>;
defm VWMULSU_V : VWMUL_MV_V_X<"vwmulsu", 0b111010>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
// Vector Single-Width Integer Multiply-Add Instructions
defm VMACC_V : VMAC_MV_V_X<"vmacc", 0b101101>;
defm VNMSAC_V : VMAC_MV_V_X<"vnmsac", 0b101111>;
defm VMADD_V : VMAC_MV_V_X<"vmadd", 0b101001>;
defm VNMSUB_V : VMAC_MV_V_X<"vnmsub", 0b101011>;
// Vector Widening Integer Multiply-Add Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
defm VWMACCU_V : VWMAC_MV_V_X<"vwmaccu", 0b111100>;
defm VWMACC_V : VWMAC_MV_V_X<"vwmacc", 0b111101>;
defm VWMACCSU_V : VWMAC_MV_V_X<"vwmaccsu", 0b111111>;
defm VWMACCUS_V : VWMAC_MV_X<"vwmaccus", 0b111110>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
// Vector Integer Merge Instructions
defm VMERGE_V : VMRG_IV_V_X_I<"vmerge", 0b010111>;
// Vector Integer Move Instructions
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
    RVVConstraint = NoConstraint  in {
// op vd, vs1
def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
                       (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">,
              Sched<[WriteVIMovV_WorstCase, ReadVIMovV_WorstCase]>;
// op vd, rs1
def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
                       (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">,
              Sched<[WriteVIMovX_WorstCase, ReadVIMovX_WorstCase]>;
// op vd, imm
def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
                       (ins simm5:$imm), "vmv.v.i", "$vd, $imm">,
              Sched<[WriteVIMovI_WorstCase]>;
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
// Vector Fixed-Point Arithmetic Instructions
defm VSADDU_V : VSALU_IV_V_X_I<"vsaddu", 0b100000>;
defm VSADD_V : VSALU_IV_V_X_I<"vsadd", 0b100001>;
defm VSSUBU_V : VSALU_IV_V_X<"vssubu", 0b100010>;
defm VSSUB_V : VSALU_IV_V_X<"vssub", 0b100011>;
// Vector Single-Width Averaging Add and Subtract
defm VAADDU_V : VAALU_MV_V_X<"vaaddu", 0b001000>;
defm VAADD_V : VAALU_MV_V_X<"vaadd", 0b001001>;
defm VASUBU_V : VAALU_MV_V_X<"vasubu", 0b001010>;
defm VASUB_V : VAALU_MV_V_X<"vasub", 0b001011>;
// Vector Single-Width Fractional Multiply with Rounding and Saturation
defm VSMUL_V : VSMUL_IV_V_X<"vsmul", 0b100111>;
// Vector Single-Width Scaling Shift Instructions
defm VSSRL_V : VSSHF_IV_V_X_I<"vssrl", 0b101010>;
defm VSSRA_V : VSSHF_IV_V_X_I<"vssra", 0b101011>;
// Vector Narrowing Fixed-Point Clip Instructions
let Constraints = "@earlyclobber $vd" in {
defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110>;
defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111>;
} // Constraints = "@earlyclobber $vd"
} // Predicates = [HasVInstructions]
let Predicates = [HasVInstructionsAnyF] in {
// Vector Single-Width Floating-Point Add/Subtract Instructions
let Uses = [FRM], mayRaiseFPException = true in {
defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
}
// Vector Widening Floating-Point Add/Subtract Instructions
let Constraints = "@earlyclobber $vd",
    Uses = [FRM],
    mayRaiseFPException = true in {
let RVVConstraint = WidenV in {
defm VFWADD_V : VWALU_FV_V_F<"vfwadd", 0b110000, "v">;
defm VFWSUB_V : VWALU_FV_V_F<"vfwsub", 0b110010, "v">;
} // RVVConstraint = WidenV
// Set earlyclobber for following instructions for second and mask operands.
// This has the downside that the earlyclobber constraint is too coarse and
// will impose unnecessary restrictions by not allowing the destination to
// overlap with the first (wide) operand.
let RVVConstraint = WidenW in {
defm VFWADD_W : VWALU_FV_V_F<"vfwadd", 0b110100, "w">;
defm VFWSUB_W : VWALU_FV_V_F<"vfwsub", 0b110110, "w">;
} // RVVConstraint = WidenW
} // Constraints = "@earlyclobber $vd", Uses = [FRM], mayRaiseFPException = true
// Vector Single-Width Floating-Point Multiply/Divide Instructions
let Uses = [FRM], mayRaiseFPException = true in {
defm VFMUL_V : VMUL_FV_V_F<"vfmul", 0b100100>;
defm VFDIV_V : VDIV_FV_V_F<"vfdiv", 0b100000>;
defm VFRDIV_V : VDIV_FV_F<"vfrdiv", 0b100001>;
}
// Vector Widening Floating-Point Multiply
let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
    Uses = [FRM], mayRaiseFPException = true in {
defm VFWMUL_V : VWMUL_FV_V_F<"vfwmul", 0b111000>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
let Uses = [FRM], mayRaiseFPException = true in {
defm VFMACC_V : VMAC_FV_V_F<"vfmacc", 0b101100>;
defm VFNMACC_V : VMAC_FV_V_F<"vfnmacc", 0b101101>;
defm VFMSAC_V : VMAC_FV_V_F<"vfmsac", 0b101110>;
defm VFNMSAC_V : VMAC_FV_V_F<"vfnmsac", 0b101111>;
defm VFMADD_V : VMAC_FV_V_F<"vfmadd", 0b101000>;
defm VFNMADD_V : VMAC_FV_V_F<"vfnmadd", 0b101001>;
defm VFMSUB_V : VMAC_FV_V_F<"vfmsub", 0b101010>;
defm VFNMSUB_V : VMAC_FV_V_F<"vfnmsub", 0b101011>;
}
// Vector Widening Floating-Point Fused Multiply-Add Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
    Uses = [FRM], mayRaiseFPException = true in {
defm VFWMACC_V : VWMAC_FV_V_F<"vfwmacc", 0b111100>;
defm VFWNMACC_V : VWMAC_FV_V_F<"vfwnmacc", 0b111101>;
defm VFWMSAC_V : VWMAC_FV_V_F<"vfwmsac", 0b111110>;
defm VFWNMSAC_V : VWMAC_FV_V_F<"vfwnmsac", 0b111111>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
// Vector Floating-Point Square-Root Instruction
let Uses = [FRM], mayRaiseFPException = true in {
defm VFSQRT_V : VSQR_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
defm VFREC7_V : VRCP_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
}
let mayRaiseFPException = true in
defm VFRSQRT7_V : VRCP_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
// Vector Floating-Point MIN/MAX Instructions
let mayRaiseFPException = true in {
defm VFMIN_V : VMINMAX_FV_V_F<"vfmin", 0b000100>;
defm VFMAX_V : VMINMAX_FV_V_F<"vfmax", 0b000110>;
}
// Vector Floating-Point Sign-Injection Instructions
defm VFSGNJ_V : VSGNJ_FV_V_F<"vfsgnj", 0b001000>;
defm VFSGNJN_V : VSGNJ_FV_V_F<"vfsgnjn", 0b001001>;
defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>;
def : InstAlias<"vfneg.v $vd, $vs$vm",
                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
def : InstAlias<"vfneg.v $vd, $vs",
                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
def : InstAlias<"vfabs.v $vd, $vs$vm",
                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
def : InstAlias<"vfabs.v $vd, $vs",
                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
// Vector Floating-Point Compare Instructions
let RVVConstraint = NoConstraint, mayRaiseFPException = true in {
defm VMFEQ_V : VCMP_FV_V_F<"vmfeq", 0b011000>;
defm VMFNE_V : VCMP_FV_V_F<"vmfne", 0b011100>;
defm VMFLT_V : VCMP_FV_V_F<"vmflt", 0b011011>;
defm VMFLE_V : VCMP_FV_V_F<"vmfle", 0b011001>;
defm VMFGT_V : VCMP_FV_F<"vmfgt", 0b011101>;
defm VMFGE_V : VCMP_FV_F<"vmfge", 0b011111>;
} // RVVConstraint = NoConstraint, mayRaiseFPException = true
def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
                (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
                (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
// Vector Floating-Point Classify Instruction
defm VFCLASS_V : VCLS_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
// Vector Floating-Point Merge Instruction
let vm = 0 in
def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
                           (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
                           "vfmerge.vfm", "$vd, $vs2, $rs1, v0">,
                  Sched<[WriteVFMergeV_WorstCase, ReadVFMergeV_WorstCase,
                         ReadVFMergeF_WorstCase, ReadVMask]>;
// Vector Floating-Point Move Instruction
let RVVConstraint = NoConstraint in
let vm = 1, vs2 = 0 in
def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
                       (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1">,
               Sched<[WriteVFMovV_WorstCase, ReadVFMovF_WorstCase]>;
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
// Single-Width Floating-Point/Integer Type-Convert Instructions
let mayRaiseFPException = true in {
let Uses = [FRM] in {
defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
}
defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
let Uses = [FRM] in {
defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
}
} // mayRaiseFPException = true
// Widening Floating-Point/Integer Type-Convert Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt,
    mayRaiseFPException = true in {
let Uses = [FRM] in {
defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
}
defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
// Narrowing Floating-Point/Integer Type-Convert Instructions
let Constraints = "@earlyclobber $vd", mayRaiseFPException = true in {
let Uses = [FRM] in {
defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
}
defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
let Uses = [FRM] in {
defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
}
defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
} // Constraints = "@earlyclobber $vd", mayRaiseFPException = true
} // Predicates = HasVInstructionsAnyF]
let Predicates = [HasVInstructions] in {
// Vector Single-Width Integer Reduction Instructions
let RVVConstraint = NoConstraint in {
defm VREDSUM  : VRED_MV_V<"vredsum", 0b000000>;
defm VREDMAXU : VREDMINMAX_MV_V<"vredmaxu", 0b000110>;
defm VREDMAX  : VREDMINMAX_MV_V<"vredmax", 0b000111>;
defm VREDMINU : VREDMINMAX_MV_V<"vredminu", 0b000100>;
defm VREDMIN  : VREDMINMAX_MV_V<"vredmin", 0b000101>;
defm VREDAND  : VRED_MV_V<"vredand", 0b000001>;
defm VREDOR   : VRED_MV_V<"vredor", 0b000010>;
defm VREDXOR  : VRED_MV_V<"vredxor", 0b000011>;
} // RVVConstraint = NoConstraint
// Vector Widening Integer Reduction Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
// Set earlyclobber for following instructions for second and mask operands.
// This has the downside that the earlyclobber constraint is too coarse and
// will impose unnecessary restrictions by not allowing the destination to
// overlap with the first (wide) operand.
defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>;
defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
} // Predicates = [HasVInstructions]
let Predicates = [HasVInstructionsAnyF] in {
// Vector Single-Width Floating-Point Reduction Instructions
let RVVConstraint = NoConstraint in {
let Uses = [FRM], mayRaiseFPException = true in {
defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>;
defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>;
}
let mayRaiseFPException = true in {
defm VFREDMAX : VREDMINMAX_FV_V<"vfredmax", 0b000111>;
defm VFREDMIN : VREDMINMAX_FV_V<"vfredmin", 0b000101>;
}
} // RVVConstraint = NoConstraint
def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm",
                (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
// Vector Widening Floating-Point Reduction Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
// Set earlyclobber for following instructions for second and mask operands.
// This has the downside that the earlyclobber constraint is too coarse and
// will impose unnecessary restrictions by not allowing the destination to
// overlap with the first (wide) operand.
let Uses = [FRM], mayRaiseFPException = true in {
defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>;
defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>;
}
} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm",
                (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
} // Predicates = [HasVInstructionsAnyF]
let Predicates = [HasVInstructions] in {
// Vector Mask-Register Logical Instructions
let RVVConstraint = NoConstraint in {
defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">;
defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">;
defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">;
defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">;
defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">;
defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">;
defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">;
}
def : InstAlias<"vmmv.m $vd, $vs",
                (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
def : InstAlias<"vmclr.m $vd",
                (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
def : InstAlias<"vmset.m $vd",
                (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
def : InstAlias<"vmnot.m $vd, $vs",
                (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1",
                (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
def : InstAlias<"vmornot.mm $vd, $vs2, $vs1",
                (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
    RVVConstraint = NoConstraint  in {
// Vector mask population count vcpop
def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
                      (ins VR:$vs2, VMaskOp:$vm),
                      "vcpop.m", "$vd, $vs2$vm">,
              Sched<[WriteVMPopV_WorstCase, ReadVMPopV_WorstCase,
                     ReadVMask]>;
// vfirst find-first-set mask bit
def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
                       (ins VR:$vs2, VMaskOp:$vm),
                       "vfirst.m", "$vd, $vs2$vm">,
              Sched<[WriteVMFFSV_WorstCase, ReadVMFFSV_WorstCase,
                     ReadVMask]>;
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
def : InstAlias<"vpopc.m $vd, $vs2$vm",
                (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>;
let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
// vmsbf.m set-before-first mask bit
defm VMSBF_M : VMSFS_MV_V<"vmsbf.m", 0b010100, 0b00001>;
// vmsif.m set-including-first mask bit
defm VMSIF_M : VMSFS_MV_V<"vmsif.m", 0b010100, 0b00011>;
// vmsof.m set-only-first mask bit
defm VMSOF_M : VMSFS_MV_V<"vmsof.m", 0b010100, 0b00010>;
// Vector Iota Instruction
defm VIOTA_M : VMIOT_MV_V<"viota.m", 0b010100, 0b10000>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
// Vector Element Index Instruction
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
let vs2 = 0 in
def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
                    (ins VMaskOp:$vm), "vid.v", "$vd$vm">,
            Sched<[WriteVMIdxV_WorstCase, ReadVMask]>;
// Integer Scalar Move Instructions
let vm = 1, RVVConstraint = NoConstraint in {
def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
                      (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">,
              Sched<[WriteVIMovVX, ReadVIMovVX]>;
let Constraints = "$vd = $vd_wb" in
def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
                      (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">,
              Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>;
}
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
} // Predicates = [HasVInstructions]
let Predicates = [HasVInstructionsAnyF] in {
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
    RVVConstraint = NoConstraint  in {
// Floating-Point Scalar Move Instructions
def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
                      (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">,
               Sched<[WriteVFMovVF, ReadVFMovVF]>;
let Constraints = "$vd = $vd_wb" in
def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
                       (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">,
               Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>;
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
} // Predicates = [HasVInstructionsAnyF]
let Predicates = [HasVInstructions] in {
// Vector Slide Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110>;
defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111>;
defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
} // Predicates = [HasVInstructions]
let Predicates = [HasVInstructionsAnyF] in {
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>;
} // Predicates = [HasVInstructionsAnyF]
let Predicates = [HasVInstructions] in {
// Vector Register Gather Instruction
let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100>;
def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">,
                      Sched<[WriteVRGatherVV_WorstCase, ReadVRGatherVV_data_WorstCase,
                             ReadVRGatherVV_index_WorstCase]>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
// Vector Compress Instruction
let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
defm VCOMPRESS_V : VCPR_MV_Mask<"vcompress", 0b010111>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isMoveReg = 1,
    RVVConstraint = NoConstraint in {
// A future extension may relax the vector register alignment restrictions.
foreach n = [1, 2, 4, 8] in {
  defvar vrc = !cast<VReg>(!if(!eq(n, 1), "VR", "VRM"#n));
  def VMV#n#R_V  : RVInstV<0b100111, !add(n, -1), OPIVI, (outs vrc:$vd),
                           (ins vrc:$vs2), "vmv" # n # "r.v", "$vd, $vs2">,
                   VMVRSched<n> {
    let Uses = [];
    let vm = 1;
  }
}
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
} // Predicates = [HasVInstructions]
let Predicates = [HasVInstructions] in {
  foreach nf=2-8 in {
    foreach eew = [8, 16, 32] in {
      defvar w = !cast<RISCVWidth>("LSWidth"#eew);
      def VLSEG#nf#E#eew#_V :
        VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
        VLSEGSched<nf, eew>;
      def VLSEG#nf#E#eew#FF_V :
        VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
        VLSEGFFSched<nf, eew>;
      def VSSEG#nf#E#eew#_V :
        VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
        VSSEGSched<nf, eew>;
      // Vector Strided Instructions
      def VLSSEG#nf#E#eew#_V :
        VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
        VLSSEGSched<nf, eew>;
      def VSSSEG#nf#E#eew#_V :
        VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
        VSSSEGSched<nf, eew>;
      // Vector Indexed Instructions
      def VLUXSEG#nf#EI#eew#_V :
        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
                            "vluxseg"#nf#"ei"#eew#".v">,
        VLXSEGSched<nf, eew, "U">;
      def VLOXSEG#nf#EI#eew#_V :
        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
                            "vloxseg"#nf#"ei"#eew#".v">,
        VLXSEGSched<nf, eew, "O">;
      def VSUXSEG#nf#EI#eew#_V :
        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
                             "vsuxseg"#nf#"ei"#eew#".v">,
        VSXSEGSched<nf, eew, "U">;
      def VSOXSEG#nf#EI#eew#_V :
        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
                             "vsoxseg"#nf#"ei"#eew#".v">,
        VSXSEGSched<nf, eew, "O">;
    }
  }
} // Predicates = [HasVInstructions]
let Predicates = [HasVInstructionsI64] in {
  foreach nf=2-8 in {
    // Vector Unit-strided Segment Instructions
    def VLSEG#nf#E64_V :
      VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">,
      VLSEGSched<nf, 64>;
    def VLSEG#nf#E64FF_V :
      VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">,
      VLSEGFFSched<nf, 64>;
    def VSSEG#nf#E64_V :
      VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">,
      VSSEGSched<nf, 64>;
    // Vector Strided Segment Instructions
    def VLSSEG#nf#E64_V :
      VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">,
      VLSSEGSched<nf, 64>;
    def VSSSEG#nf#E64_V :
      VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">,
      VSSSEGSched<nf, 64>;
  }
} // Predicates = [HasVInstructionsI64]
let Predicates = [HasVInstructionsI64, IsRV64] in {
  foreach nf = 2 - 8 in {
    // Vector Indexed Segment Instructions
    def VLUXSEG #nf #EI64_V
        : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
                              "vluxseg" #nf #"ei64.v">,
          VLXSEGSched<nf, 64, "U">;
    def VLOXSEG #nf #EI64_V
        : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
                              "vloxseg" #nf #"ei64.v">,
          VLXSEGSched<nf, 64, "O">;
    def VSUXSEG #nf #EI64_V
        : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
                               "vsuxseg" #nf #"ei64.v">,
          VSXSEGSched<nf, 64, "U">;
    def VSOXSEG #nf #EI64_V
        : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
                               "vsoxseg" #nf #"ei64.v">,
          VSXSEGSched<nf, 64, "O">;
  }
} // Predicates = [HasVInstructionsI64, IsRV64]
include "RISCVInstrInfoVPseudos.td"
 |