| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
 1000
 1001
 1002
 1003
 1004
 1005
 1006
 1007
 1008
 1009
 1010
 1011
 1012
 1013
 1014
 1015
 1016
 1017
 1018
 1019
 1020
 1021
 1022
 1023
 1024
 1025
 1026
 1027
 1028
 1029
 1030
 1031
 1032
 1033
 1034
 1035
 1036
 1037
 1038
 1039
 1040
 1041
 1042
 1043
 1044
 1045
 1046
 1047
 1048
 1049
 1050
 1051
 1052
 1053
 1054
 1055
 1056
 1057
 1058
 1059
 1060
 1061
 1062
 1063
 1064
 1065
 1066
 1067
 1068
 1069
 1070
 1071
 1072
 1073
 1074
 1075
 1076
 1077
 1078
 1079
 1080
 1081
 1082
 1083
 1084
 1085
 1086
 1087
 1088
 1089
 1090
 1091
 1092
 1093
 1094
 1095
 1096
 1097
 1098
 1099
 1100
 1101
 1102
 1103
 1104
 1105
 1106
 1107
 1108
 1109
 1110
 1111
 1112
 1113
 1114
 1115
 1116
 1117
 1118
 1119
 1120
 1121
 1122
 1123
 1124
 1125
 1126
 1127
 1128
 1129
 1130
 1131
 1132
 1133
 1134
 1135
 1136
 1137
 1138
 1139
 1140
 1141
 1142
 1143
 1144
 1145
 1146
 1147
 1148
 1149
 1150
 1151
 1152
 1153
 1154
 1155
 1156
 1157
 1158
 1159
 1160
 1161
 1162
 1163
 1164
 1165
 1166
 1167
 1168
 1169
 1170
 1171
 1172
 1173
 1174
 1175
 1176
 1177
 1178
 1179
 1180
 1181
 1182
 1183
 1184
 1185
 1186
 1187
 1188
 1189
 1190
 1191
 1192
 1193
 1194
 1195
 1196
 1197
 1198
 1199
 1200
 1201
 1202
 1203
 1204
 1205
 1206
 1207
 1208
 1209
 1210
 1211
 1212
 1213
 1214
 1215
 1216
 1217
 1218
 1219
 1220
 1221
 1222
 1223
 1224
 1225
 1226
 1227
 1228
 1229
 1230
 1231
 1232
 1233
 1234
 1235
 1236
 1237
 1238
 1239
 1240
 1241
 1242
 1243
 1244
 1245
 1246
 1247
 1248
 1249
 1250
 1251
 1252
 1253
 1254
 1255
 1256
 1257
 1258
 1259
 1260
 1261
 1262
 1263
 1264
 1265
 1266
 1267
 1268
 1269
 1270
 1271
 1272
 1273
 1274
 1275
 1276
 1277
 1278
 1279
 1280
 1281
 1282
 1283
 1284
 1285
 1286
 1287
 1288
 1289
 1290
 1291
 1292
 1293
 1294
 1295
 1296
 1297
 1298
 1299
 1300
 1301
 1302
 1303
 1304
 1305
 1306
 1307
 1308
 1309
 1310
 1311
 1312
 1313
 1314
 1315
 1316
 1317
 1318
 1319
 1320
 1321
 1322
 1323
 1324
 1325
 1326
 1327
 1328
 1329
 1330
 1331
 1332
 1333
 1334
 1335
 1336
 1337
 1338
 1339
 1340
 1341
 1342
 1343
 1344
 1345
 1346
 1347
 1348
 1349
 1350
 1351
 1352
 1353
 1354
 1355
 1356
 1357
 1358
 1359
 1360
 1361
 1362
 1363
 1364
 1365
 1366
 1367
 1368
 1369
 1370
 1371
 1372
 1373
 1374
 1375
 1376
 1377
 1378
 1379
 1380
 1381
 1382
 1383
 1384
 1385
 1386
 1387
 1388
 1389
 1390
 1391
 1392
 1393
 1394
 1395
 1396
 1397
 1398
 1399
 1400
 1401
 1402
 1403
 1404
 1405
 1406
 1407
 1408
 1409
 1410
 1411
 1412
 1413
 1414
 1415
 1416
 1417
 1418
 1419
 1420
 1421
 1422
 1423
 1424
 1425
 1426
 1427
 1428
 1429
 1430
 1431
 1432
 1433
 1434
 1435
 1436
 1437
 1438
 1439
 1440
 1441
 1442
 1443
 1444
 1445
 1446
 1447
 1448
 1449
 1450
 1451
 1452
 1453
 1454
 1455
 1456
 1457
 1458
 1459
 1460
 1461
 1462
 1463
 1464
 1465
 1466
 1467
 1468
 1469
 1470
 1471
 1472
 1473
 1474
 1475
 1476
 1477
 1478
 1479
 1480
 1481
 1482
 1483
 1484
 1485
 1486
 1487
 1488
 1489
 1490
 1491
 1492
 1493
 1494
 1495
 1496
 1497
 1498
 1499
 1500
 1501
 1502
 1503
 1504
 1505
 1506
 1507
 1508
 1509
 1510
 1511
 1512
 1513
 1514
 1515
 1516
 1517
 1518
 1519
 1520
 1521
 1522
 1523
 1524
 1525
 1526
 1527
 1528
 1529
 1530
 1531
 1532
 1533
 1534
 1535
 1536
 1537
 1538
 1539
 1540
 1541
 1542
 1543
 1544
 1545
 1546
 1547
 1548
 1549
 1550
 1551
 1552
 1553
 1554
 1555
 1556
 1557
 1558
 1559
 1560
 1561
 1562
 1563
 1564
 1565
 1566
 1567
 1568
 1569
 1570
 1571
 1572
 1573
 1574
 1575
 1576
 1577
 1578
 1579
 1580
 1581
 1582
 1583
 1584
 1585
 1586
 1587
 1588
 1589
 1590
 1591
 1592
 1593
 1594
 1595
 1596
 1597
 1598
 1599
 1600
 1601
 1602
 1603
 1604
 1605
 1606
 1607
 1608
 1609
 1610
 1611
 1612
 1613
 1614
 1615
 1616
 1617
 1618
 1619
 1620
 1621
 1622
 1623
 1624
 1625
 1626
 1627
 1628
 1629
 1630
 1631
 1632
 1633
 1634
 1635
 1636
 1637
 1638
 1639
 1640
 1641
 1642
 1643
 1644
 1645
 1646
 1647
 1648
 1649
 1650
 1651
 1652
 1653
 1654
 1655
 1656
 1657
 1658
 1659
 1660
 1661
 1662
 1663
 1664
 1665
 1666
 1667
 1668
 1669
 1670
 1671
 1672
 1673
 1674
 1675
 1676
 1677
 1678
 1679
 1680
 1681
 1682
 1683
 1684
 1685
 1686
 1687
 1688
 1689
 1690
 1691
 1692
 1693
 1694
 1695
 1696
 1697
 1698
 1699
 1700
 1701
 1702
 1703
 1704
 1705
 1706
 1707
 1708
 1709
 1710
 1711
 1712
 1713
 1714
 1715
 1716
 1717
 1718
 1719
 1720
 1721
 1722
 1723
 1724
 1725
 1726
 1727
 1728
 1729
 1730
 1731
 1732
 1733
 1734
 1735
 1736
 1737
 1738
 1739
 1740
 1741
 1742
 1743
 1744
 1745
 1746
 1747
 1748
 1749
 1750
 1751
 1752
 1753
 1754
 1755
 1756
 1757
 1758
 1759
 1760
 1761
 1762
 1763
 1764
 1765
 1766
 1767
 1768
 1769
 1770
 1771
 1772
 1773
 1774
 1775
 1776
 1777
 1778
 1779
 1780
 1781
 1782
 1783
 1784
 1785
 1786
 1787
 1788
 1789
 1790
 1791
 1792
 1793
 1794
 1795
 1796
 1797
 1798
 1799
 1800
 1801
 1802
 1803
 1804
 1805
 1806
 1807
 1808
 1809
 1810
 1811
 1812
 1813
 1814
 1815
 1816
 1817
 1818
 1819
 1820
 1821
 1822
 1823
 1824
 1825
 1826
 1827
 1828
 1829
 1830
 1831
 1832
 1833
 1834
 1835
 1836
 1837
 1838
 1839
 1840
 1841
 1842
 1843
 1844
 1845
 1846
 1847
 1848
 1849
 1850
 1851
 1852
 1853
 1854
 1855
 1856
 1857
 1858
 1859
 1860
 1861
 1862
 1863
 1864
 1865
 1866
 1867
 1868
 1869
 1870
 1871
 1872
 1873
 1874
 1875
 1876
 1877
 1878
 1879
 1880
 1881
 1882
 1883
 1884
 1885
 1886
 1887
 1888
 1889
 1890
 1891
 1892
 1893
 1894
 1895
 1896
 1897
 1898
 1899
 1900
 1901
 1902
 1903
 1904
 1905
 1906
 1907
 1908
 1909
 1910
 1911
 1912
 1913
 1914
 1915
 1916
 1917
 1918
 1919
 1920
 1921
 1922
 1923
 1924
 1925
 1926
 1927
 1928
 1929
 1930
 1931
 1932
 1933
 1934
 1935
 1936
 1937
 1938
 1939
 1940
 1941
 1942
 1943
 1944
 1945
 1946
 1947
 1948
 1949
 1950
 1951
 1952
 1953
 1954
 1955
 1956
 1957
 1958
 1959
 1960
 1961
 1962
 1963
 1964
 1965
 1966
 1967
 1968
 1969
 1970
 1971
 1972
 1973
 1974
 1975
 1976
 1977
 1978
 1979
 1980
 1981
 1982
 1983
 1984
 1985
 1986
 1987
 1988
 1989
 1990
 1991
 1992
 1993
 1994
 1995
 1996
 1997
 1998
 1999
 2000
 2001
 2002
 2003
 2004
 2005
 2006
 2007
 2008
 2009
 2010
 2011
 2012
 2013
 2014
 2015
 2016
 2017
 2018
 2019
 2020
 2021
 2022
 2023
 2024
 2025
 2026
 2027
 2028
 2029
 2030
 2031
 2032
 2033
 2034
 2035
 2036
 2037
 2038
 2039
 2040
 2041
 2042
 2043
 2044
 2045
 2046
 2047
 2048
 2049
 2050
 2051
 2052
 2053
 2054
 2055
 2056
 2057
 2058
 2059
 2060
 2061
 2062
 2063
 2064
 2065
 2066
 2067
 2068
 2069
 2070
 2071
 2072
 2073
 2074
 2075
 2076
 2077
 2078
 2079
 2080
 2081
 2082
 2083
 2084
 2085
 2086
 2087
 2088
 2089
 2090
 2091
 2092
 2093
 2094
 2095
 2096
 2097
 2098
 2099
 2100
 2101
 2102
 2103
 2104
 2105
 2106
 2107
 2108
 2109
 2110
 2111
 2112
 2113
 2114
 2115
 2116
 2117
 2118
 2119
 2120
 2121
 2122
 2123
 2124
 2125
 2126
 2127
 2128
 2129
 2130
 2131
 2132
 2133
 2134
 2135
 2136
 2137
 2138
 2139
 2140
 2141
 2142
 2143
 2144
 2145
 2146
 2147
 2148
 2149
 2150
 2151
 2152
 2153
 2154
 2155
 2156
 2157
 2158
 2159
 2160
 2161
 2162
 2163
 2164
 2165
 2166
 2167
 2168
 2169
 2170
 2171
 2172
 2173
 2174
 2175
 2176
 2177
 2178
 2179
 2180
 2181
 2182
 2183
 2184
 2185
 2186
 2187
 2188
 2189
 2190
 2191
 2192
 2193
 2194
 2195
 2196
 2197
 2198
 2199
 2200
 2201
 2202
 2203
 2204
 2205
 2206
 2207
 2208
 2209
 2210
 2211
 2212
 2213
 2214
 2215
 2216
 2217
 2218
 2219
 2220
 2221
 2222
 2223
 2224
 2225
 2226
 2227
 2228
 2229
 2230
 2231
 2232
 2233
 2234
 2235
 2236
 2237
 2238
 2239
 2240
 2241
 2242
 2243
 2244
 2245
 2246
 2247
 2248
 2249
 2250
 2251
 2252
 2253
 2254
 2255
 2256
 2257
 2258
 2259
 2260
 2261
 2262
 2263
 2264
 2265
 2266
 2267
 2268
 2269
 2270
 2271
 2272
 2273
 2274
 2275
 2276
 2277
 2278
 2279
 2280
 2281
 2282
 2283
 2284
 2285
 2286
 2287
 2288
 2289
 2290
 2291
 2292
 2293
 2294
 2295
 2296
 2297
 2298
 2299
 2300
 2301
 2302
 2303
 2304
 2305
 2306
 2307
 2308
 2309
 2310
 2311
 2312
 2313
 2314
 2315
 2316
 2317
 2318
 2319
 2320
 2321
 2322
 2323
 2324
 2325
 2326
 2327
 2328
 2329
 2330
 2331
 2332
 2333
 2334
 2335
 2336
 2337
 2338
 2339
 2340
 2341
 2342
 2343
 2344
 2345
 2346
 2347
 2348
 2349
 2350
 2351
 2352
 2353
 2354
 2355
 2356
 2357
 2358
 2359
 2360
 2361
 2362
 2363
 2364
 2365
 2366
 2367
 2368
 2369
 2370
 2371
 2372
 2373
 2374
 2375
 2376
 2377
 2378
 2379
 2380
 2381
 2382
 2383
 2384
 2385
 2386
 2387
 2388
 2389
 2390
 2391
 2392
 2393
 2394
 2395
 2396
 2397
 2398
 2399
 2400
 2401
 2402
 2403
 2404
 2405
 2406
 2407
 2408
 2409
 2410
 2411
 2412
 2413
 2414
 2415
 2416
 2417
 2418
 2419
 2420
 2421
 2422
 2423
 2424
 2425
 2426
 2427
 2428
 2429
 2430
 2431
 2432
 2433
 2434
 2435
 2436
 2437
 2438
 2439
 2440
 2441
 2442
 2443
 2444
 2445
 2446
 2447
 2448
 2449
 2450
 2451
 2452
 2453
 2454
 2455
 2456
 2457
 2458
 2459
 2460
 2461
 2462
 2463
 2464
 2465
 2466
 2467
 2468
 2469
 2470
 2471
 2472
 2473
 2474
 2475
 2476
 2477
 2478
 2479
 2480
 2481
 2482
 2483
 2484
 2485
 2486
 2487
 2488
 2489
 2490
 2491
 2492
 2493
 2494
 2495
 2496
 2497
 2498
 2499
 2500
 2501
 2502
 2503
 2504
 2505
 2506
 2507
 2508
 2509
 2510
 2511
 2512
 2513
 2514
 2515
 2516
 2517
 2518
 2519
 2520
 2521
 2522
 2523
 2524
 2525
 2526
 2527
 2528
 2529
 2530
 2531
 2532
 2533
 2534
 2535
 2536
 2537
 2538
 2539
 2540
 2541
 2542
 2543
 2544
 2545
 2546
 2547
 2548
 2549
 2550
 2551
 2552
 2553
 2554
 2555
 2556
 2557
 2558
 2559
 2560
 2561
 2562
 2563
 2564
 2565
 2566
 2567
 2568
 2569
 2570
 2571
 2572
 2573
 2574
 2575
 2576
 2577
 2578
 2579
 2580
 2581
 2582
 2583
 2584
 2585
 2586
 2587
 2588
 2589
 2590
 2591
 2592
 2593
 2594
 2595
 2596
 2597
 2598
 2599
 2600
 2601
 2602
 2603
 2604
 2605
 2606
 2607
 2608
 2609
 2610
 2611
 2612
 2613
 2614
 2615
 2616
 2617
 2618
 2619
 2620
 2621
 2622
 2623
 2624
 2625
 2626
 2627
 2628
 2629
 2630
 2631
 2632
 2633
 2634
 2635
 2636
 2637
 2638
 2639
 2640
 2641
 2642
 2643
 2644
 2645
 2646
 2647
 2648
 2649
 2650
 2651
 2652
 2653
 2654
 2655
 2656
 2657
 2658
 2659
 2660
 2661
 2662
 2663
 2664
 2665
 2666
 2667
 2668
 2669
 2670
 2671
 2672
 2673
 2674
 2675
 2676
 2677
 2678
 2679
 2680
 2681
 2682
 2683
 2684
 2685
 2686
 2687
 2688
 2689
 2690
 2691
 2692
 2693
 2694
 2695
 2696
 2697
 2698
 2699
 2700
 2701
 2702
 2703
 2704
 2705
 2706
 2707
 2708
 2709
 2710
 2711
 2712
 2713
 2714
 2715
 2716
 2717
 2718
 2719
 2720
 2721
 2722
 2723
 2724
 2725
 2726
 2727
 2728
 2729
 2730
 2731
 2732
 2733
 2734
 2735
 2736
 2737
 2738
 2739
 2740
 2741
 2742
 2743
 2744
 2745
 2746
 2747
 2748
 2749
 2750
 2751
 2752
 2753
 2754
 2755
 2756
 2757
 2758
 2759
 2760
 2761
 2762
 2763
 2764
 2765
 2766
 2767
 2768
 2769
 2770
 2771
 2772
 2773
 2774
 2775
 2776
 2777
 2778
 2779
 2780
 2781
 2782
 2783
 2784
 2785
 2786
 2787
 2788
 2789
 2790
 2791
 2792
 2793
 2794
 2795
 2796
 2797
 2798
 2799
 2800
 2801
 2802
 2803
 2804
 2805
 2806
 2807
 2808
 2809
 2810
 2811
 2812
 2813
 2814
 2815
 2816
 2817
 2818
 2819
 2820
 2821
 2822
 2823
 2824
 2825
 2826
 2827
 2828
 2829
 2830
 2831
 2832
 2833
 2834
 2835
 2836
 2837
 2838
 2839
 2840
 2841
 2842
 2843
 2844
 2845
 2846
 2847
 2848
 2849
 2850
 2851
 2852
 2853
 2854
 2855
 2856
 2857
 2858
 2859
 2860
 2861
 2862
 2863
 2864
 2865
 2866
 2867
 2868
 2869
 2870
 2871
 2872
 2873
 2874
 2875
 2876
 2877
 2878
 2879
 2880
 2881
 2882
 2883
 2884
 2885
 2886
 2887
 2888
 2889
 2890
 2891
 2892
 2893
 2894
 2895
 2896
 2897
 2898
 2899
 2900
 2901
 2902
 2903
 2904
 2905
 2906
 2907
 2908
 2909
 2910
 2911
 2912
 2913
 2914
 2915
 2916
 2917
 2918
 2919
 2920
 2921
 2922
 2923
 2924
 2925
 2926
 2927
 2928
 2929
 2930
 2931
 2932
 2933
 2934
 2935
 2936
 2937
 2938
 2939
 2940
 2941
 2942
 2943
 2944
 2945
 2946
 2947
 2948
 2949
 2950
 2951
 2952
 2953
 2954
 2955
 2956
 2957
 2958
 2959
 2960
 2961
 2962
 2963
 2964
 2965
 2966
 2967
 2968
 2969
 2970
 2971
 2972
 2973
 2974
 2975
 2976
 2977
 2978
 2979
 2980
 2981
 2982
 2983
 2984
 2985
 2986
 2987
 2988
 2989
 2990
 2991
 2992
 2993
 2994
 2995
 2996
 2997
 2998
 2999
 3000
 3001
 3002
 3003
 3004
 3005
 3006
 3007
 3008
 3009
 3010
 3011
 3012
 3013
 3014
 3015
 3016
 3017
 3018
 3019
 3020
 3021
 3022
 3023
 3024
 3025
 3026
 3027
 3028
 3029
 3030
 3031
 3032
 3033
 3034
 3035
 3036
 3037
 3038
 3039
 3040
 3041
 3042
 3043
 3044
 3045
 3046
 3047
 3048
 3049
 3050
 3051
 3052
 3053
 3054
 3055
 3056
 3057
 3058
 3059
 3060
 3061
 3062
 3063
 3064
 3065
 3066
 3067
 3068
 3069
 3070
 3071
 3072
 3073
 3074
 3075
 3076
 3077
 3078
 3079
 3080
 3081
 3082
 3083
 3084
 3085
 3086
 3087
 3088
 3089
 3090
 3091
 3092
 3093
 3094
 3095
 3096
 3097
 3098
 3099
 3100
 3101
 3102
 3103
 3104
 3105
 3106
 3107
 3108
 3109
 3110
 3111
 3112
 3113
 3114
 3115
 3116
 3117
 3118
 3119
 3120
 3121
 3122
 3123
 3124
 3125
 3126
 3127
 3128
 3129
 3130
 3131
 3132
 3133
 3134
 3135
 3136
 3137
 3138
 3139
 3140
 3141
 3142
 3143
 3144
 3145
 3146
 3147
 3148
 3149
 3150
 3151
 3152
 3153
 3154
 3155
 3156
 3157
 3158
 3159
 3160
 3161
 3162
 3163
 3164
 3165
 3166
 3167
 3168
 3169
 3170
 3171
 3172
 3173
 3174
 3175
 3176
 3177
 3178
 3179
 3180
 3181
 3182
 3183
 3184
 3185
 3186
 3187
 3188
 3189
 3190
 3191
 3192
 3193
 3194
 3195
 3196
 3197
 3198
 3199
 3200
 3201
 3202
 3203
 3204
 3205
 3206
 3207
 3208
 3209
 3210
 3211
 3212
 3213
 3214
 3215
 3216
 3217
 3218
 3219
 3220
 3221
 3222
 3223
 3224
 3225
 3226
 3227
 3228
 3229
 3230
 3231
 3232
 3233
 3234
 3235
 3236
 3237
 3238
 3239
 3240
 3241
 3242
 3243
 3244
 3245
 3246
 3247
 3248
 3249
 3250
 3251
 3252
 3253
 3254
 3255
 3256
 3257
 3258
 3259
 3260
 3261
 3262
 3263
 3264
 3265
 3266
 3267
 3268
 3269
 3270
 3271
 3272
 3273
 3274
 3275
 3276
 3277
 3278
 3279
 3280
 3281
 3282
 3283
 3284
 3285
 3286
 3287
 3288
 3289
 3290
 3291
 3292
 3293
 3294
 3295
 3296
 3297
 3298
 3299
 3300
 3301
 3302
 3303
 3304
 3305
 3306
 3307
 3308
 3309
 3310
 3311
 3312
 3313
 3314
 3315
 3316
 3317
 3318
 3319
 3320
 3321
 3322
 3323
 3324
 3325
 3326
 3327
 3328
 3329
 3330
 3331
 3332
 3333
 3334
 3335
 3336
 3337
 3338
 3339
 3340
 3341
 3342
 3343
 3344
 3345
 3346
 3347
 3348
 3349
 3350
 3351
 3352
 3353
 3354
 3355
 3356
 3357
 3358
 3359
 3360
 3361
 3362
 3363
 3364
 3365
 3366
 3367
 3368
 3369
 3370
 3371
 3372
 3373
 3374
 3375
 3376
 3377
 3378
 3379
 3380
 3381
 3382
 3383
 3384
 3385
 3386
 3387
 3388
 3389
 3390
 3391
 3392
 3393
 3394
 3395
 3396
 3397
 3398
 3399
 3400
 3401
 3402
 3403
 3404
 3405
 3406
 3407
 3408
 3409
 3410
 3411
 3412
 3413
 3414
 3415
 3416
 3417
 3418
 3419
 3420
 3421
 3422
 3423
 3424
 3425
 3426
 3427
 3428
 3429
 3430
 3431
 3432
 3433
 3434
 3435
 3436
 3437
 3438
 3439
 3440
 3441
 3442
 3443
 3444
 3445
 3446
 3447
 3448
 3449
 3450
 3451
 3452
 3453
 3454
 3455
 3456
 3457
 3458
 3459
 3460
 3461
 3462
 3463
 3464
 3465
 3466
 3467
 3468
 3469
 3470
 3471
 3472
 3473
 3474
 3475
 3476
 3477
 3478
 3479
 3480
 3481
 3482
 3483
 3484
 3485
 3486
 3487
 3488
 3489
 
 | /*
 * Tiny Code Generator for QEMU
 *
 * Copyright (c) 2008 Andrzej Zaborowski
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
#include "elf.h"
int arm_arch = __ARM_ARCH;
#ifndef use_idiv_instructions
bool use_idiv_instructions;
#endif
#ifndef use_neon_instructions
bool use_neon_instructions;
#endif
/* Used for function call generation. */
#define TCG_TARGET_STACK_ALIGN          8
#define TCG_TARGET_CALL_STACK_OFFSET    0
#define TCG_TARGET_CALL_ARG_I32         TCG_CALL_ARG_NORMAL
#define TCG_TARGET_CALL_ARG_I64         TCG_CALL_ARG_EVEN
#define TCG_TARGET_CALL_ARG_I128        TCG_CALL_ARG_EVEN
#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_BY_REF
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%sp",  "%r14", "%pc",
    "%q0",  "%q1",  "%q2",  "%q3",  "%q4",  "%q5",  "%q6",  "%q7",
    "%q8",  "%q9",  "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
};
#endif
static const int tcg_target_reg_alloc_order[] = {
    TCG_REG_R4,
    TCG_REG_R5,
    TCG_REG_R6,
    TCG_REG_R7,
    TCG_REG_R8,
    TCG_REG_R9,
    TCG_REG_R10,
    TCG_REG_R11,
    TCG_REG_R13,
    TCG_REG_R0,
    TCG_REG_R1,
    TCG_REG_R2,
    TCG_REG_R3,
    TCG_REG_R12,
    TCG_REG_R14,
    TCG_REG_Q0,
    TCG_REG_Q1,
    TCG_REG_Q2,
    TCG_REG_Q3,
    /* Q4 - Q7 are call-saved, and skipped. */
    TCG_REG_Q8,
    TCG_REG_Q9,
    TCG_REG_Q10,
    TCG_REG_Q11,
    TCG_REG_Q12,
    TCG_REG_Q13,
    TCG_REG_Q14,
    TCG_REG_Q15,
};
static const int tcg_target_call_iarg_regs[4] = {
    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
};
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
{
    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
    tcg_debug_assert(slot >= 0 && slot <= 3);
    return TCG_REG_R0 + slot;
}
#define TCG_REG_TMP  TCG_REG_R12
#define TCG_VEC_TMP  TCG_REG_Q15
#define TCG_REG_GUEST_BASE  TCG_REG_R11
typedef enum {
    COND_EQ = 0x0,
    COND_NE = 0x1,
    COND_CS = 0x2,	/* Unsigned greater or equal */
    COND_CC = 0x3,	/* Unsigned less than */
    COND_MI = 0x4,	/* Negative */
    COND_PL = 0x5,	/* Zero or greater */
    COND_VS = 0x6,	/* Overflow */
    COND_VC = 0x7,	/* No overflow */
    COND_HI = 0x8,	/* Unsigned greater than */
    COND_LS = 0x9,	/* Unsigned less or equal */
    COND_GE = 0xa,
    COND_LT = 0xb,
    COND_GT = 0xc,
    COND_LE = 0xd,
    COND_AL = 0xe,
} ARMCond;
#define TO_CPSR (1 << 20)
#define SHIFT_IMM_LSL(im)	(((im) << 7) | 0x00)
#define SHIFT_IMM_LSR(im)	(((im) << 7) | 0x20)
#define SHIFT_IMM_ASR(im)	(((im) << 7) | 0x40)
#define SHIFT_IMM_ROR(im)	(((im) << 7) | 0x60)
#define SHIFT_REG_LSL(rs)	(((rs) << 8) | 0x10)
#define SHIFT_REG_LSR(rs)	(((rs) << 8) | 0x30)
#define SHIFT_REG_ASR(rs)	(((rs) << 8) | 0x50)
#define SHIFT_REG_ROR(rs)	(((rs) << 8) | 0x70)
typedef enum {
    ARITH_AND = 0x0 << 21,
    ARITH_EOR = 0x1 << 21,
    ARITH_SUB = 0x2 << 21,
    ARITH_RSB = 0x3 << 21,
    ARITH_ADD = 0x4 << 21,
    ARITH_ADC = 0x5 << 21,
    ARITH_SBC = 0x6 << 21,
    ARITH_RSC = 0x7 << 21,
    ARITH_TST = 0x8 << 21 | TO_CPSR,
    ARITH_CMP = 0xa << 21 | TO_CPSR,
    ARITH_CMN = 0xb << 21 | TO_CPSR,
    ARITH_ORR = 0xc << 21,
    ARITH_MOV = 0xd << 21,
    ARITH_BIC = 0xe << 21,
    ARITH_MVN = 0xf << 21,
    INSN_B         = 0x0a000000,
    INSN_CLZ       = 0x016f0f10,
    INSN_RBIT      = 0x06ff0f30,
    INSN_LDMIA     = 0x08b00000,
    INSN_STMDB     = 0x09200000,
    INSN_LDR_IMM   = 0x04100000,
    INSN_LDR_REG   = 0x06100000,
    INSN_STR_IMM   = 0x04000000,
    INSN_STR_REG   = 0x06000000,
    INSN_LDRH_IMM  = 0x005000b0,
    INSN_LDRH_REG  = 0x001000b0,
    INSN_LDRSH_IMM = 0x005000f0,
    INSN_LDRSH_REG = 0x001000f0,
    INSN_STRH_IMM  = 0x004000b0,
    INSN_STRH_REG  = 0x000000b0,
    INSN_LDRB_IMM  = 0x04500000,
    INSN_LDRB_REG  = 0x06500000,
    INSN_LDRSB_IMM = 0x005000d0,
    INSN_LDRSB_REG = 0x001000d0,
    INSN_STRB_IMM  = 0x04400000,
    INSN_STRB_REG  = 0x06400000,
    INSN_LDRD_IMM  = 0x004000d0,
    INSN_LDRD_REG  = 0x000000d0,
    INSN_STRD_IMM  = 0x004000f0,
    INSN_STRD_REG  = 0x000000f0,
    INSN_DMB_ISH   = 0xf57ff05b,
    INSN_DMB_MCR   = 0xee070fba,
    INSN_MSRI_CPSR = 0x0360f000,
    /* Architected nop introduced in v6k.  */
    /* ??? This is an MSR (imm) 0,0,0 insn.  Anyone know if this
       also Just So Happened to do nothing on pre-v6k so that we
       don't need to conditionalize it?  */
    INSN_NOP_v6k   = 0xe320f000,
    /* Otherwise the assembler uses mov r0,r0 */
    INSN_NOP_v4    = (COND_AL << 28) | ARITH_MOV,
    INSN_VADD      = 0xf2000800,
    INSN_VAND      = 0xf2000110,
    INSN_VBIC      = 0xf2100110,
    INSN_VEOR      = 0xf3000110,
    INSN_VORN      = 0xf2300110,
    INSN_VORR      = 0xf2200110,
    INSN_VSUB      = 0xf3000800,
    INSN_VMUL      = 0xf2000910,
    INSN_VQADD     = 0xf2000010,
    INSN_VQADD_U   = 0xf3000010,
    INSN_VQSUB     = 0xf2000210,
    INSN_VQSUB_U   = 0xf3000210,
    INSN_VMAX      = 0xf2000600,
    INSN_VMAX_U    = 0xf3000600,
    INSN_VMIN      = 0xf2000610,
    INSN_VMIN_U    = 0xf3000610,
    INSN_VABS      = 0xf3b10300,
    INSN_VMVN      = 0xf3b00580,
    INSN_VNEG      = 0xf3b10380,
    INSN_VCEQ0     = 0xf3b10100,
    INSN_VCGT0     = 0xf3b10000,
    INSN_VCGE0     = 0xf3b10080,
    INSN_VCLE0     = 0xf3b10180,
    INSN_VCLT0     = 0xf3b10200,
    INSN_VCEQ      = 0xf3000810,
    INSN_VCGE      = 0xf2000310,
    INSN_VCGT      = 0xf2000300,
    INSN_VCGE_U    = 0xf3000310,
    INSN_VCGT_U    = 0xf3000300,
    INSN_VSHLI     = 0xf2800510,  /* VSHL (immediate) */
    INSN_VSARI     = 0xf2800010,  /* VSHR.S */
    INSN_VSHRI     = 0xf3800010,  /* VSHR.U */
    INSN_VSLI      = 0xf3800510,
    INSN_VSHL_S    = 0xf2000400,  /* VSHL.S (register) */
    INSN_VSHL_U    = 0xf3000400,  /* VSHL.U (register) */
    INSN_VBSL      = 0xf3100110,
    INSN_VBIT      = 0xf3200110,
    INSN_VBIF      = 0xf3300110,
    INSN_VTST      = 0xf2000810,
    INSN_VDUP_G    = 0xee800b10,  /* VDUP (ARM core register) */
    INSN_VDUP_S    = 0xf3b00c00,  /* VDUP (scalar) */
    INSN_VLDR_D    = 0xed100b00,  /* VLDR.64 */
    INSN_VLD1      = 0xf4200000,  /* VLD1 (multiple single elements) */
    INSN_VLD1R     = 0xf4a00c00,  /* VLD1 (single element to all lanes) */
    INSN_VST1      = 0xf4000000,  /* VST1 (multiple single elements) */
    INSN_VMOVI     = 0xf2800010,  /* VMOV (immediate) */
} ARMInsn;
#define INSN_NOP   (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
static const uint8_t tcg_cond_to_arm_cond[] = {
    [TCG_COND_EQ] = COND_EQ,
    [TCG_COND_NE] = COND_NE,
    [TCG_COND_LT] = COND_LT,
    [TCG_COND_GE] = COND_GE,
    [TCG_COND_LE] = COND_LE,
    [TCG_COND_GT] = COND_GT,
    /* unsigned */
    [TCG_COND_LTU] = COND_CC,
    [TCG_COND_GEU] = COND_CS,
    [TCG_COND_LEU] = COND_LS,
    [TCG_COND_GTU] = COND_HI,
};
static int encode_imm(uint32_t imm);
/* TCG private relocation type: add with pc+imm8 */
#define R_ARM_PC8  11
/* TCG private relocation type: vldr with imm8 << 2 */
#define R_ARM_PC11 12
static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
{
    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
    if (offset == sextract32(offset, 0, 24)) {
        *src_rw = deposit32(*src_rw, 0, 24, offset);
        return true;
    }
    return false;
}
static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
{
    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
    if (offset >= -0xfff && offset <= 0xfff) {
        tcg_insn_unit insn = *src_rw;
        bool u = (offset >= 0);
        if (!u) {
            offset = -offset;
        }
        insn = deposit32(insn, 23, 1, u);
        insn = deposit32(insn, 0, 12, offset);
        *src_rw = insn;
        return true;
    }
    return false;
}
static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
{
    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
    if (offset >= -0xff && offset <= 0xff) {
        tcg_insn_unit insn = *src_rw;
        bool u = (offset >= 0);
        if (!u) {
            offset = -offset;
        }
        insn = deposit32(insn, 23, 1, u);
        insn = deposit32(insn, 0, 8, offset);
        *src_rw = insn;
        return true;
    }
    return false;
}
static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
{
    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
    int imm12 = encode_imm(offset);
    if (imm12 >= 0) {
        *src_rw = deposit32(*src_rw, 0, 12, imm12);
        return true;
    }
    return false;
}
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
                        intptr_t value, intptr_t addend)
{
    tcg_debug_assert(addend == 0);
    switch (type) {
    case R_ARM_PC24:
        return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
    case R_ARM_PC13:
        return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
    case R_ARM_PC11:
        return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
    case R_ARM_PC8:
        return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
    default:
        g_assert_not_reached();
    }
}
#define TCG_CT_CONST_ARM  0x100
#define TCG_CT_CONST_INV  0x200
#define TCG_CT_CONST_NEG  0x400
#define TCG_CT_CONST_ZERO 0x800
#define TCG_CT_CONST_ORRI 0x1000
#define TCG_CT_CONST_ANDI 0x2000
#define ALL_GENERAL_REGS  0xffffu
#define ALL_VECTOR_REGS   0xffff0000u
/*
 * r0-r3 will be overwritten when reading the tlb entry (system-mode only);
 * r14 will be overwritten by the BLNE branching to the slow path.
 */
#define ALL_QLDST_REGS \
    (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14)))
/*
 * ARM immediates for ALU instructions are made of an unsigned 8-bit
 * right-rotated by an even amount between 0 and 30.
 *
 * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
 */
static int encode_imm(uint32_t imm)
{
    uint32_t rot, imm8;
    /* Simple case, no rotation required. */
    if ((imm & ~0xff) == 0) {
        return imm;
    }
    /* Next, try a simple even shift.  */
    rot = ctz32(imm) & ~1;
    imm8 = imm >> rot;
    rot = 32 - rot;
    if ((imm8 & ~0xff) == 0) {
        goto found;
    }
    /*
     * Finally, try harder with rotations.
     * The ctz test above will have taken care of rotates >= 8.
     */
    for (rot = 2; rot < 8; rot += 2) {
        imm8 = rol32(imm, rot);
        if ((imm8 & ~0xff) == 0) {
            goto found;
        }
    }
    /* Fail: imm cannot be encoded. */
    return -1;
 found:
    /* Note that rot is even, and we discard bit 0 by shifting by 7. */
    return rot << 7 | imm8;
}
static int encode_imm_nofail(uint32_t imm)
{
    int ret = encode_imm(imm);
    tcg_debug_assert(ret >= 0);
    return ret;
}
static bool check_fit_imm(uint32_t imm)
{
    return encode_imm(imm) >= 0;
}
/* Return true if v16 is a valid 16-bit shifted immediate.  */
static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
{
    if (v16 == (v16 & 0xff)) {
        *cmode = 0x8;
        *imm8 = v16 & 0xff;
        return true;
    } else if (v16 == (v16 & 0xff00)) {
        *cmode = 0xa;
        *imm8 = v16 >> 8;
        return true;
    }
    return false;
}
/* Return true if v32 is a valid 32-bit shifted immediate.  */
static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
{
    if (v32 == (v32 & 0xff)) {
        *cmode = 0x0;
        *imm8 = v32 & 0xff;
        return true;
    } else if (v32 == (v32 & 0xff00)) {
        *cmode = 0x2;
        *imm8 = (v32 >> 8) & 0xff;
        return true;
    } else if (v32 == (v32 & 0xff0000)) {
        *cmode = 0x4;
        *imm8 = (v32 >> 16) & 0xff;
        return true;
    } else if (v32 == (v32 & 0xff000000)) {
        *cmode = 0x6;
        *imm8 = v32 >> 24;
        return true;
    }
    return false;
}
/* Return true if v32 is a valid 32-bit shifting ones immediate.  */
static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
{
    if ((v32 & 0xffff00ff) == 0xff) {
        *cmode = 0xc;
        *imm8 = (v32 >> 8) & 0xff;
        return true;
    } else if ((v32 & 0xff00ffff) == 0xffff) {
        *cmode = 0xd;
        *imm8 = (v32 >> 16) & 0xff;
        return true;
    }
    return false;
}
/*
 * Return non-zero if v32 can be formed by MOVI+ORR.
 * Place the parameters for MOVI in (cmode, imm8).
 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
 */
static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
{
    int i;
    for (i = 6; i > 0; i -= 2) {
        /* Mask out one byte we can add with ORR.  */
        uint32_t tmp = v32 & ~(0xffu << (i * 4));
        if (is_shimm32(tmp, cmode, imm8) ||
            is_soimm32(tmp, cmode, imm8)) {
            break;
        }
    }
    return i;
}
/* Return true if V is a valid 16-bit or 32-bit shifted immediate.  */
static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
{
    if (v32 == deposit32(v32, 16, 16, v32)) {
        return is_shimm16(v32, cmode, imm8);
    } else {
        return is_shimm32(v32, cmode, imm8);
    }
}
/* Test if a constant matches the constraint.
 * TODO: define constraints for:
 *
 * ldr/str offset:   between -0xfff and 0xfff
 * ldrh/strh offset: between -0xff and 0xff
 * mov operand2:     values represented with x << (2 * y), x < 0x100
 * add, sub, eor...: ditto
 */
static bool tcg_target_const_match(int64_t val, int ct,
                                   TCGType type, TCGCond cond, int vece)
{
    if (ct & TCG_CT_CONST) {
        return 1;
    } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
        return 1;
    } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
        return 1;
    } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
        return 1;
    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
        return 1;
    }
    switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
    case 0:
        break;
    case TCG_CT_CONST_ANDI:
        val = ~val;
        /* fallthru */
    case TCG_CT_CONST_ORRI:
        if (val == deposit64(val, 32, 32, val)) {
            int cmode, imm8;
            return is_shimm1632(val, &cmode, &imm8);
        }
        break;
    default:
        /* Both bits should not be set for the same insn.  */
        g_assert_not_reached();
    }
    return 0;
}
static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
{
    tcg_out32(s, (cond << 28) | INSN_B |
                    (((offset - 8) >> 2) & 0x00ffffff));
}
static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
{
    tcg_out32(s, (cond << 28) | 0x0b000000 |
                    (((offset - 8) >> 2) & 0x00ffffff));
}
static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
{
    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
}
static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
{
    tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
                (((offset - 8) >> 2) & 0x00ffffff));
}
static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
                            TCGReg rd, TCGReg rn, TCGReg rm, int shift)
{
    tcg_out32(s, (cond << 28) | (0 << 25) | opc |
                    (rn << 16) | (rd << 12) | shift | rm);
}
static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
{
    /* Simple reg-reg move, optimising out the 'do nothing' case */
    if (rd != rm) {
        tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
    }
}
static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
{
    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
}
static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
{
    /*
     * Unless the C portion of QEMU is compiled as thumb, we don't need
     * true BX semantics; merely a branch to an address held in a register.
     */
    tcg_out_bx_reg(s, cond, rn);
}
static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
                            TCGReg rd, TCGReg rn, int im)
{
    tcg_out32(s, (cond << 28) | (1 << 25) | opc |
                    (rn << 16) | (rd << 12) | im);
}
static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
                          TCGReg rn, uint16_t mask)
{
    tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
}
/* Note that this routine is used for both LDR and LDRH formats, so we do
   not wish to include an immediate shift at this point.  */
static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
                            TCGReg rn, TCGReg rm, bool u, bool p, bool w)
{
    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
              | (w << 21) | (rn << 16) | (rt << 12) | rm);
}
static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
                            TCGReg rn, int imm8, bool p, bool w)
{
    bool u = 1;
    if (imm8 < 0) {
        imm8 = -imm8;
        u = 0;
    }
    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
              (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
}
static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
                             TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
{
    bool u = 1;
    if (imm12 < 0) {
        imm12 = -imm12;
        u = 0;
    }
    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
              (rn << 16) | (rt << 12) | imm12);
}
static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
                            TCGReg rn, int imm12)
{
    tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
}
static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
                            TCGReg rn, int imm12)
{
    tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
}
static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
}
static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
}
static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, int imm8)
{
    tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
}
static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
}
static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, int imm8)
{
    tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
}
static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
}
/* Register pre-increment with base writeback.  */
static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
                             TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
}
static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
                             TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
}
static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
                            TCGReg rn, int imm8)
{
    tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
}
static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, int imm8)
{
    tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
}
static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
                            TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
}
static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
}
static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
                            TCGReg rn, int imm8)
{
    tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
}
static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
                            TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
}
static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, int imm12)
{
    tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
}
static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, int imm12)
{
    tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
}
static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
                          TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
}
static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
                          TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
}
static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, int imm8)
{
    tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
}
static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
                           TCGReg rn, TCGReg rm)
{
    tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
}
static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
                              TCGReg rd, uint32_t arg)
{
    new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
    tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
}
static void tcg_out_movi32(TCGContext *s, ARMCond cond,
                           TCGReg rd, uint32_t arg)
{
    int imm12, diff, opc, sh1, sh2;
    uint32_t tt0, tt1, tt2;
    /* Check a single MOV/MVN before anything else.  */
    imm12 = encode_imm(arg);
    if (imm12 >= 0) {
        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
        return;
    }
    imm12 = encode_imm(~arg);
    if (imm12 >= 0) {
        tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
        return;
    }
    /* Check for a pc-relative address.  This will usually be the TB,
       or within the TB, which is immediately before the code block.  */
    diff = tcg_pcrel_diff(s, (void *)arg) - 8;
    if (diff >= 0) {
        imm12 = encode_imm(diff);
        if (imm12 >= 0) {
            tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
            return;
        }
    } else {
        imm12 = encode_imm(-diff);
        if (imm12 >= 0) {
            tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
            return;
        }
    }
    /* Use movw + movt.  */
    if (use_armv7_instructions) {
        /* movw */
        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
        if (arg & 0xffff0000) {
            /* movt */
            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
        }
        return;
    }
    /* Look for sequences of two insns.  If we have lots of 1's, we can
       shorten the sequence by beginning with mvn and then clearing
       higher bits with eor.  */
    tt0 = arg;
    opc = ARITH_MOV;
    if (ctpop32(arg) > 16) {
        tt0 = ~arg;
        opc = ARITH_MVN;
    }
    sh1 = ctz32(tt0) & ~1;
    tt1 = tt0 & ~(0xff << sh1);
    sh2 = ctz32(tt1) & ~1;
    tt2 = tt1 & ~(0xff << sh2);
    if (tt2 == 0) {
        int rot;
        rot = ((32 - sh1) << 7) & 0xf00;
        tcg_out_dat_imm(s, cond, opc, rd,  0, ((tt0 >> sh1) & 0xff) | rot);
        rot = ((32 - sh2) << 7) & 0xf00;
        tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
                        ((tt0 >> sh2) & 0xff) | rot);
        return;
    }
    /* Otherwise, drop it into the constant pool.  */
    tcg_out_movi_pool(s, cond, rd, arg);
}
/*
 * Emit either the reg,imm or reg,reg form of a data-processing insn.
 * rhs must satisfy the "rI" constraint.
 */
static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
                           TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
{
    if (rhs_is_const) {
        tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
    } else {
        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
    }
}
/*
 * Emit either the reg,imm or reg,reg form of a data-processing insn.
 * rhs must satisfy the "rIK" constraint.
 */
static void tcg_out_dat_IK(TCGContext *s, ARMCond cond, ARMInsn opc,
                            ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs)
{
    int imm12 = encode_imm(rhs);
    if (imm12 < 0) {
        imm12 = encode_imm_nofail(~rhs);
        opc = opinv;
    }
    tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
}
static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
                            ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
                            bool rhs_is_const)
{
    if (rhs_is_const) {
        tcg_out_dat_IK(s, cond, opc, opinv, dst, lhs, rhs);
    } else {
        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
    }
}
static void tcg_out_dat_IN(TCGContext *s, ARMCond cond, ARMInsn opc,
                           ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs)
{
    int imm12 = encode_imm(rhs);
    if (imm12 < 0) {
        imm12 = encode_imm_nofail(-rhs);
        opc = opneg;
    }
    tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
}
static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
                            ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
                            bool rhs_is_const)
{
    /* Emit either the reg,imm or reg,reg form of a data-processing insn.
     * rhs must satisfy the "rIN" constraint.
     */
    if (rhs_is_const) {
        tcg_out_dat_IN(s, cond, opc, opneg, dst, lhs, rhs);
    } else {
        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
    }
}
static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
{
    /* sxtb */
    tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
}
static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
{
    tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
}
static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
{
    /* sxth */
    tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
}
static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
{
    /* uxth */
    tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn);
}
static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
{
    g_assert_not_reached();
}
static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
{
    g_assert_not_reached();
}
static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
{
    g_assert_not_reached();
}
static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
{
    g_assert_not_reached();
}
static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
{
    g_assert_not_reached();
}
static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
                         TCGReg a2, unsigned ofs, unsigned len)
{
    /* bfi/bfc */
    tcg_debug_assert(a0 == a1);
    tcg_out32(s, 0x07c00010 | (COND_AL << 28) | (a0 << 12) | a2
              | (ofs << 7) | ((ofs + len - 1) << 16));
}
static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
                          tcg_target_long a2, unsigned ofs, unsigned len)
{
    /* bfi becomes bfc with rn == 15.  */
    tgen_deposit(s, type, a0, a1, 15, ofs, len);
}
static const TCGOutOpDeposit outop_deposit = {
    .base.static_constraint = C_O1_I2(r, 0, rZ),
    .out_rrr = tgen_deposit,
    .out_rri = tgen_depositi,
};
static void tgen_extract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn,
                         unsigned ofs, unsigned len)
{
    /* According to gcc, AND can be faster. */
    if (ofs == 0 && len <= 8) {
        tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn,
                        encode_imm_nofail((1 << len) - 1));
        return;
    }
    if (use_armv7_instructions) {
        /* ubfx */
        tcg_out32(s, 0x07e00050 | (COND_AL << 28) | (rd << 12) | rn
                  | (ofs << 7) | ((len - 1) << 16));
        return;
    }
    assert(ofs % 8 == 0);
    switch (len) {
    case 8:
        /* uxtb */
        tcg_out32(s, 0x06ef0070 | (COND_AL << 28) |
                  (rd << 12) | (ofs << 7) | rn);
        break;
    case 16:
        /* uxth */
        tcg_out32(s, 0x06ff0070 | (COND_AL << 28) |
                  (rd << 12) | (ofs << 7) | rn);
        break;
    default:
        g_assert_not_reached();
    }
}
static const TCGOutOpExtract outop_extract = {
    .base.static_constraint = C_O1_I1(r, r),
    .out_rr = tgen_extract,
};
static void tgen_sextract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn,
                          unsigned ofs, unsigned len)
{
    if (use_armv7_instructions) {
        /* sbfx */
        tcg_out32(s, 0x07a00050 | (COND_AL << 28) | (rd << 12) | rn
                  | (ofs << 7) | ((len - 1) << 16));
        return;
    }
    assert(ofs % 8 == 0);
    switch (len) {
    case 8:
        /* sxtb */
        tcg_out32(s, 0x06af0070 | (COND_AL << 28) |
                  (rd << 12) | (ofs << 7) | rn);
        break;
    case 16:
        /* sxth */
        tcg_out32(s, 0x06bf0070 | (COND_AL << 28) |
                  (rd << 12) | (ofs << 7) | rn);
        break;
    default:
        g_assert_not_reached();
    }
}
static const TCGOutOpExtract outop_sextract = {
    .base.static_constraint = C_O1_I1(r, r),
    .out_rr = tgen_sextract,
};
static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
                          TCGReg rd, TCGReg rn, int32_t offset)
{
    if (offset > 0xfff || offset < -0xfff) {
        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
    } else
        tcg_out_ld32_12(s, cond, rd, rn, offset);
}
static void tcg_out_st32(TCGContext *s, ARMCond cond,
                         TCGReg rd, TCGReg rn, int32_t offset)
{
    if (offset > 0xfff || offset < -0xfff) {
        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
    } else
        tcg_out_st32_12(s, cond, rd, rn, offset);
}
/*
 * The _goto case is normally between TBs within the same code buffer, and
 * with the code buffer limited to 16MB we wouldn't need the long case.
 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
 */
static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
{
    intptr_t addri = (intptr_t)addr;
    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
    bool arm_mode = !(addri & 1);
    if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
        tcg_out_b_imm(s, cond, disp);
        return;
    }
    /* LDR is interworking from v5t. */
    tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
}
/*
 * The call case is mostly used for helpers - so it's not unreasonable
 * for them to be beyond branch range.
 */
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr)
{
    intptr_t addri = (intptr_t)addr;
    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
    bool arm_mode = !(addri & 1);
    if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
        if (arm_mode) {
            tcg_out_bl_imm(s, COND_AL, disp);
        } else {
            tcg_out_blx_imm(s, disp);
        }
        return;
    }
    tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
    tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
}
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr,
                         const TCGHelperInfo *info)
{
    tcg_out_call_int(s, addr);
}
static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
{
    if (l->has_value) {
        tcg_out_goto(s, cond, l->u.value_ptr);
    } else {
        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
        tcg_out_b_imm(s, cond, 0);
    }
}
static void tcg_out_br(TCGContext *s, TCGLabel *l)
{
    tcg_out_goto_label(s, COND_AL, l);
}
static void tcg_out_mb(TCGContext *s, unsigned a0)
{
    if (use_armv7_instructions) {
        tcg_out32(s, INSN_DMB_ISH);
    } else {
        tcg_out32(s, INSN_DMB_MCR);
    }
}
static TCGCond tgen_cmp(TCGContext *s, TCGCond cond, TCGReg a, TCGReg b)
{
    if (is_tst_cond(cond)) {
        tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
        return tcg_tst_eqne_cond(cond);
    }
    tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, a, b, SHIFT_IMM_LSL(0));
    return cond;
}
static TCGCond tgen_cmpi(TCGContext *s, TCGCond cond, TCGReg a, TCGArg b)
{
    int imm12;
    if (!is_tst_cond(cond)) {
        tcg_out_dat_IN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b);
        return cond;
    }
    /*
     * The compare constraints allow rIN, but TST does not support N.
     * Be prepared to load the constant into a scratch register.
     */
    imm12 = encode_imm(b);
    if (imm12 >= 0) {
        tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
    } else {
        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b);
        tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0,
                        a, TCG_REG_TMP, SHIFT_IMM_LSL(0));
    }
    return tcg_tst_eqne_cond(cond);
}
static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
                           TCGArg b, int b_const)
{
    if (b_const) {
        return tgen_cmpi(s, cond, a, b);
    } else {
        return tgen_cmp(s, cond, a, b);
    }
}
static TCGCond tcg_out_cmp2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
                            TCGArg bl, bool const_bl, TCGArg bh, bool const_bh)
{
    switch (cond) {
    case TCG_COND_EQ:
    case TCG_COND_NE:
    case TCG_COND_LTU:
    case TCG_COND_LEU:
    case TCG_COND_GTU:
    case TCG_COND_GEU:
        /*
         * We perform a conditional comparison.  If the high half is
         * equal, then overwrite the flags with the comparison of the
         * low half.  The resulting flags cover the whole.
         */
        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
        tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
        return cond;
    case TCG_COND_TSTEQ:
    case TCG_COND_TSTNE:
        /* Similar, but with TST instead of CMP. */
        tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh);
        tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl);
        return tcg_tst_eqne_cond(cond);
    case TCG_COND_LT:
    case TCG_COND_GE:
        /* We perform a double-word subtraction and examine the result.
           We do not actually need the result of the subtract, so the
           low part "subtract" is a compare.  For the high half we have
           no choice but to compute into a temporary.  */
        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
        tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
                       TCG_REG_TMP, ah, bh, const_bh);
        return cond;
    case TCG_COND_LE:
    case TCG_COND_GT:
        /* Similar, but with swapped arguments, via reversed subtract.  */
        tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
                       TCG_REG_TMP, al, bl, const_bl);
        tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
                       TCG_REG_TMP, ah, bh, const_bh);
        return tcg_swap_cond(cond);
    default:
        g_assert_not_reached();
    }
}
/*
 * Note that TCGReg references Q-registers.
 * Q-regno = 2 * D-regno, so shift left by 1 while inserting.
 */
static uint32_t encode_vd(TCGReg rd)
{
    tcg_debug_assert(rd >= TCG_REG_Q0);
    return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
}
static uint32_t encode_vn(TCGReg rn)
{
    tcg_debug_assert(rn >= TCG_REG_Q0);
    return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
}
static uint32_t encode_vm(TCGReg rm)
{
    tcg_debug_assert(rm >= TCG_REG_Q0);
    return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
}
static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
                          TCGReg d, TCGReg m)
{
    tcg_out32(s, insn | (vece << 18) | (q << 6) |
              encode_vd(d) | encode_vm(m));
}
static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
                          TCGReg d, TCGReg n, TCGReg m)
{
    tcg_out32(s, insn | (vece << 20) | (q << 6) |
              encode_vd(d) | encode_vn(n) | encode_vm(m));
}
static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
                          int q, int op, int cmode, uint8_t imm8)
{
    tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
              | (cmode << 8) | extract32(imm8, 0, 4)
              | (extract32(imm8, 4, 3) << 16)
              | (extract32(imm8, 7, 1) << 24));
}
static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
                            TCGReg rd, TCGReg rm, int l_imm6)
{
    tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
              (extract32(l_imm6, 6, 1) << 7) |
              (extract32(l_imm6, 0, 6) << 16));
}
static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
                          TCGReg rd, TCGReg rn, int offset)
{
    if (offset != 0) {
        if (check_fit_imm(offset) || check_fit_imm(-offset)) {
            tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
                            TCG_REG_TMP, rn, offset, true);
        } else {
            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
                            TCG_REG_TMP, TCG_REG_TMP, rn, 0);
        }
        rn = TCG_REG_TMP;
    }
    tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
}
typedef struct {
    ARMCond cond;
    TCGReg base;
    int index;
    bool index_scratch;
    TCGAtomAlign aa;
} HostAddress;
bool tcg_target_has_memory_bswap(MemOp memop)
{
    return false;
}
static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
{
    /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */
    return TCG_REG_R14;
}
static const TCGLdstHelperParam ldst_helper_param = {
    .ra_gen = ldst_ra_gen,
    .ntmp = 1,
    .tmp = { TCG_REG_TMP },
};
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
    MemOp opc = get_memop(lb->oi);
    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
        return false;
    }
    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
    tcg_out_goto(s, COND_AL, lb->raddr);
    return true;
}
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
    MemOp opc = get_memop(lb->oi);
    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
        return false;
    }
    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
    /* Tail-call to the helper, which will return to the fast path.  */
    tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
    return true;
}
/* We expect to use an 9-bit sign-magnitude negative offset from ENV.  */
#define MIN_TLB_MASK_TABLE_OFS  -256
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
                                           TCGReg addr, MemOpIdx oi, bool is_ld)
{
    TCGLabelQemuLdst *ldst = NULL;
    MemOp opc = get_memop(oi);
    unsigned a_mask;
    if (tcg_use_softmmu) {
        *h = (HostAddress){
            .cond = COND_AL,
            .base = addr,
            .index = TCG_REG_R1,
            .index_scratch = true,
        };
    } else {
        *h = (HostAddress){
            .cond = COND_AL,
            .base = addr,
            .index = guest_base ? TCG_REG_GUEST_BASE : -1,
            .index_scratch = false,
        };
    }
    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
    a_mask = (1 << h->aa.align) - 1;
    if (tcg_use_softmmu) {
        int mem_index = get_mmuidx(oi);
        int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
                            : offsetof(CPUTLBEntry, addr_write);
        int fast_off = tlb_mask_table_ofs(s, mem_index);
        unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
        TCGReg t_addr;
        ldst = new_ldst_label(s);
        ldst->is_ld = is_ld;
        ldst->oi = oi;
        ldst->addr_reg = addr;
        /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}.  */
        QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
        QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
        tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
        /* Extract the tlb index from the address into R0.  */
        tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addr,
                        SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
        /*
         * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
         * Load the tlb comparator into R2 and the fast path addend into R1.
         */
        if (cmp_off == 0) {
            tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
        } else {
            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
                            TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
            tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
        }
        /* Load the tlb addend.  */
        tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
                        offsetof(CPUTLBEntry, addend));
        /*
         * Check alignment, check comparators.
         * Do this in 2-4 insns.  Use MOVW for v7, if possible,
         * to reduce the number of sequential conditional instructions.
         * Almost all guests have at least 4k pages, which means that we need
         * to clear at least 9 bits even for an 8-byte memory, which means it
         * isn't worth checking for an immediate operand for BIC.
         *
         * For unaligned accesses, test the page of the last unit of alignment.
         * This leaves the least significant alignment bits unchanged, and of
         * course must be zero.
         */
        t_addr = addr;
        if (a_mask < s_mask) {
            t_addr = TCG_REG_R0;
            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
                            addr, s_mask - a_mask);
        }
        if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
            tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
            tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
                            t_addr, TCG_REG_TMP, 0);
            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
                            TCG_REG_R2, TCG_REG_TMP, 0);
        } else {
            if (a_mask) {
                tcg_debug_assert(a_mask <= 0xff);
                tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
            }
            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
                            SHIFT_IMM_LSR(TARGET_PAGE_BITS));
            tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
                            0, TCG_REG_R2, TCG_REG_TMP,
                            SHIFT_IMM_LSL(TARGET_PAGE_BITS));
        }
    } else if (a_mask) {
        ldst = new_ldst_label(s);
        ldst->is_ld = is_ld;
        ldst->oi = oi;
        ldst->addr_reg = addr;
        /* We are expecting alignment to max out at 7 */
        tcg_debug_assert(a_mask <= 0xff);
        /* tst addr, #mask */
        tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
    }
    return ldst;
}
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
                                   TCGReg datahi, HostAddress h)
{
    TCGReg base;
    /* Byte swapping is left to middle-end expansion. */
    tcg_debug_assert((opc & MO_BSWAP) == 0);
    switch (opc & MO_SSIZE) {
    case MO_UB:
        if (h.index < 0) {
            tcg_out_ld8_12(s, h.cond, datalo, h.base, 0);
        } else {
            tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index);
        }
        break;
    case MO_SB:
        if (h.index < 0) {
            tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0);
        } else {
            tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index);
        }
        break;
    case MO_UW:
        if (h.index < 0) {
            tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0);
        } else {
            tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index);
        }
        break;
    case MO_SW:
        if (h.index < 0) {
            tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0);
        } else {
            tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index);
        }
        break;
    case MO_UL:
        if (h.index < 0) {
            tcg_out_ld32_12(s, h.cond, datalo, h.base, 0);
        } else {
            tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index);
        }
        break;
    case MO_UQ:
        /* We used pair allocation for datalo, so already should be aligned. */
        tcg_debug_assert((datalo & 1) == 0);
        tcg_debug_assert(datahi == datalo + 1);
        /* LDRD requires alignment; double-check that. */
        if (memop_alignment_bits(opc) >= MO_64) {
            if (h.index < 0) {
                tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
                break;
            }
            /*
             * Rm (the second address op) must not overlap Rt or Rt + 1.
             * Since datalo is aligned, we can simplify the test via alignment.
             * Flip the two address arguments if that works.
             */
            if ((h.index & ~1) != datalo) {
                tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index);
                break;
            }
            if ((h.base & ~1) != datalo) {
                tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base);
                break;
            }
        }
        if (h.index < 0) {
            base = h.base;
            if (datalo == h.base) {
                tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base);
                base = TCG_REG_TMP;
            }
        } else if (h.index_scratch) {
            tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base);
            tcg_out_ld32_12(s, h.cond, datahi, h.index, 4);
            break;
        } else {
            tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
                            h.base, h.index, SHIFT_IMM_LSL(0));
            base = TCG_REG_TMP;
        }
        tcg_out_ld32_12(s, h.cond, datalo, base, 0);
        tcg_out_ld32_12(s, h.cond, datahi, base, 4);
        break;
    default:
        g_assert_not_reached();
    }
}
static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
                         TCGReg addr, MemOpIdx oi)
{
    MemOp opc = get_memop(oi);
    TCGLabelQemuLdst *ldst;
    HostAddress h;
    ldst = prepare_host_addr(s, &h, addr, oi, true);
    if (ldst) {
        ldst->type = type;
        ldst->datalo_reg = data;
        ldst->datahi_reg = -1;
        /*
         * This a conditional BL only to load a pointer within this
         * opcode into LR for the slow path.  We will not be using
         * the value for a tail call.
         */
        ldst->label_ptr[0] = s->code_ptr;
        tcg_out_bl_imm(s, COND_NE, 0);
    }
    tcg_out_qemu_ld_direct(s, opc, data, -1, h);
    if (ldst) {
        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
    }
}
static const TCGOutOpQemuLdSt outop_qemu_ld = {
    .base.static_constraint = C_O1_I1(r, q),
    .out = tgen_qemu_ld,
};
static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
                          TCGReg datahi, TCGReg addr, MemOpIdx oi)
{
    MemOp opc = get_memop(oi);
    TCGLabelQemuLdst *ldst;
    HostAddress h;
    ldst = prepare_host_addr(s, &h, addr, oi, true);
    if (ldst) {
        ldst->type = type;
        ldst->datalo_reg = datalo;
        ldst->datahi_reg = datahi;
        /*
         * This a conditional BL only to load a pointer within this
         * opcode into LR for the slow path.  We will not be using
         * the value for a tail call.
         */
        ldst->label_ptr[0] = s->code_ptr;
        tcg_out_bl_imm(s, COND_NE, 0);
    }
    tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
    if (ldst) {
        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
    }
}
static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
    .base.static_constraint = C_O2_I1(e, p, q),
    .out = tgen_qemu_ld2,
};
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
                                   TCGReg datahi, HostAddress h)
{
    /* Byte swapping is left to middle-end expansion. */
    tcg_debug_assert((opc & MO_BSWAP) == 0);
    switch (opc & MO_SIZE) {
    case MO_8:
        if (h.index < 0) {
            tcg_out_st8_12(s, h.cond, datalo, h.base, 0);
        } else {
            tcg_out_st8_r(s, h.cond, datalo, h.base, h.index);
        }
        break;
    case MO_16:
        if (h.index < 0) {
            tcg_out_st16_8(s, h.cond, datalo, h.base, 0);
        } else {
            tcg_out_st16_r(s, h.cond, datalo, h.base, h.index);
        }
        break;
    case MO_32:
        if (h.index < 0) {
            tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
        } else {
            tcg_out_st32_r(s, h.cond, datalo, h.base, h.index);
        }
        break;
    case MO_64:
        /* We used pair allocation for datalo, so already should be aligned. */
        tcg_debug_assert((datalo & 1) == 0);
        tcg_debug_assert(datahi == datalo + 1);
        /* STRD requires alignment; double-check that. */
        if (memop_alignment_bits(opc) >= MO_64) {
            if (h.index < 0) {
                tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
            } else {
                tcg_out_strd_r(s, h.cond, datalo, h.base, h.index);
            }
        } else if (h.index < 0) {
            tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
            tcg_out_st32_12(s, h.cond, datahi, h.base, 4);
        } else if (h.index_scratch) {
            tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base);
            tcg_out_st32_12(s, h.cond, datahi, h.index, 4);
        } else {
            tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
                            h.base, h.index, SHIFT_IMM_LSL(0));
            tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0);
            tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4);
        }
        break;
    default:
        g_assert_not_reached();
    }
}
static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
                         TCGReg addr, MemOpIdx oi)
{
    MemOp opc = get_memop(oi);
    TCGLabelQemuLdst *ldst;
    HostAddress h;
    ldst = prepare_host_addr(s, &h, addr, oi, false);
    if (ldst) {
        ldst->type = type;
        ldst->datalo_reg = data;
        ldst->datahi_reg = -1;
        h.cond = COND_EQ;
        tcg_out_qemu_st_direct(s, opc, data, -1, h);
        /* The conditional call is last, as we're going to return here. */
        ldst->label_ptr[0] = s->code_ptr;
        tcg_out_bl_imm(s, COND_NE, 0);
        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
    } else {
        tcg_out_qemu_st_direct(s, opc, data, -1, h);
    }
}
static const TCGOutOpQemuLdSt outop_qemu_st = {
    .base.static_constraint = C_O0_I2(q, q),
    .out = tgen_qemu_st,
};
static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
                          TCGReg datahi, TCGReg addr, MemOpIdx oi)
{
    MemOp opc = get_memop(oi);
    TCGLabelQemuLdst *ldst;
    HostAddress h;
    ldst = prepare_host_addr(s, &h, addr, oi, false);
    if (ldst) {
        ldst->type = type;
        ldst->datalo_reg = datalo;
        ldst->datahi_reg = datahi;
        h.cond = COND_EQ;
        tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
        /* The conditional call is last, as we're going to return here. */
        ldst->label_ptr[0] = s->code_ptr;
        tcg_out_bl_imm(s, COND_NE, 0);
        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
    } else {
        tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
    }
}
static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
    .base.static_constraint = C_O0_I3(Q, p, q),
    .out = tgen_qemu_st2,
};
static void tcg_out_epilogue(TCGContext *s);
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
{
    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
    tcg_out_epilogue(s);
}
static void tcg_out_goto_tb(TCGContext *s, int which)
{
    uintptr_t i_addr;
    intptr_t i_disp;
    /* Direct branch will be patched by tb_target_set_jmp_target. */
    set_jmp_insn_offset(s, which);
    tcg_out32(s, INSN_NOP);
    /* When branch is out of range, fall through to indirect. */
    i_addr = get_jmp_target_addr(s, which);
    i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
    tcg_debug_assert(i_disp < 0);
    if (i_disp >= -0xfff) {
        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
    } else {
        /*
         * The TB is close, but outside the 12 bits addressable by
         * the load.  We can extend this to 20 bits with a sub of a
         * shifted immediate from pc.
         */
        int h = -i_disp;
        int l = -(h & 0xfff);
        h = encode_imm_nofail(h + l);
        tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
    }
    set_jmp_reset_offset(s, which);
}
static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
{
    tcg_out_b_reg(s, COND_AL, a0);
}
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
                              uintptr_t jmp_rx, uintptr_t jmp_rw)
{
    uintptr_t addr = tb->jmp_target_addr[n];
    ptrdiff_t offset = addr - (jmp_rx + 8);
    tcg_insn_unit insn;
    /* Either directly branch, or fall through to indirect branch. */
    if (offset == sextract64(offset, 0, 26)) {
        /* B <addr> */
        insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
    } else {
        insn = INSN_NOP;
    }
    qatomic_set((uint32_t *)jmp_rw, insn);
    flush_idcache_range(jmp_rx, jmp_rw, 4);
}
static void tgen_add(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_addi(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_IN(s, COND_AL, ARITH_ADD, ARITH_SUB, a0, a1, a2);
}
static const TCGOutOpBinary outop_add = {
    .base.static_constraint = C_O1_I2(r, r, rIN),
    .out_rrr = tgen_add,
    .out_rri = tgen_addi,
};
static void tgen_addco(TCGContext *s, TCGType type,
                       TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_ADD | TO_CPSR,
                    a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_addco_imm(TCGContext *s, TCGType type,
                           TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_IN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
                   a0, a1, a2);
}
static const TCGOutOpBinary outop_addco = {
    .base.static_constraint = C_O1_I2(r, r, rIN),
    .out_rrr = tgen_addco,
    .out_rri = tgen_addco_imm,
};
static void tgen_addci(TCGContext *s, TCGType type,
                       TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_ADC, a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_addci_imm(TCGContext *s, TCGType type,
                           TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_IK(s, COND_AL, ARITH_ADC, ARITH_SBC, a0, a1, a2);
}
static const TCGOutOpAddSubCarry outop_addci = {
    .base.static_constraint = C_O1_I2(r, r, rIK),
    .out_rrr = tgen_addci,
    .out_rri = tgen_addci_imm,
};
static void tgen_addcio(TCGContext *s, TCGType type,
                        TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_ADC | TO_CPSR,
                    a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_addcio_imm(TCGContext *s, TCGType type,
                            TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_IK(s, COND_AL, ARITH_ADC | TO_CPSR, ARITH_SBC | TO_CPSR,
                   a0, a1, a2);
}
static const TCGOutOpBinary outop_addcio = {
    .base.static_constraint = C_O1_I2(r, r, rIK),
    .out_rrr = tgen_addcio,
    .out_rri = tgen_addcio_imm,
};
/* Set C to @c; NZVQ all set to 0. */
static void tcg_out_movi_apsr_c(TCGContext *s, bool c)
{
    int imm12 = encode_imm_nofail(c << 29);
    tcg_out32(s, (COND_AL << 28) | INSN_MSRI_CPSR | 0x80000 | imm12);
}
static void tcg_out_set_carry(TCGContext *s)
{
    tcg_out_movi_apsr_c(s, 1);
}
static void tgen_and(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_AND, a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_andi(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_IK(s, COND_AL, ARITH_AND, ARITH_BIC, a0, a1, a2);
}
static const TCGOutOpBinary outop_and = {
    .base.static_constraint = C_O1_I2(r, r, rIK),
    .out_rrr = tgen_and,
    .out_rri = tgen_andi,
};
static void tgen_andc(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_BIC, a0, a1, a2, SHIFT_IMM_LSL(0));
}
static const TCGOutOpBinary outop_andc = {
    .base.static_constraint = C_O1_I2(r, r, r),
    .out_rrr = tgen_andc,
};
static void tgen_clz(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
    tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
    tcg_out_mov_reg(s, COND_EQ, a0, a2);
}
static void tgen_clzi(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    if (a2 == 32) {
        tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
    } else {
        tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
        tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
        tcg_out_movi32(s, COND_EQ, a0, a2);
    }
}
static const TCGOutOpBinary outop_clz = {
    .base.static_constraint = C_O1_I2(r, r, rIK),
    .out_rrr = tgen_clz,
    .out_rri = tgen_clzi,
};
static const TCGOutOpUnary outop_ctpop = {
    .base.static_constraint = C_NotImplemented,
};
static void tgen_ctz(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0);
    tgen_clz(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2);
}
static void tgen_ctzi(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0);
    tgen_clzi(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2);
}
static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags)
{
    return use_armv7_instructions ? C_O1_I2(r, r, rIK) : C_NotImplemented;
}
static const TCGOutOpBinary outop_ctz = {
    .base.static_constraint = C_Dynamic,
    .base.dynamic_constraint = cset_ctz,
    .out_rrr = tgen_ctz,
    .out_rri = tgen_ctzi,
};
static TCGConstraintSetIndex cset_idiv(TCGType type, unsigned flags)
{
    return use_idiv_instructions ? C_O1_I2(r, r, r) : C_NotImplemented;
}
static void tgen_divs(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, TCGReg a2)
{
    /* sdiv */
    tcg_out32(s, 0x0710f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8));
}
static const TCGOutOpBinary outop_divs = {
    .base.static_constraint = C_Dynamic,
    .base.dynamic_constraint = cset_idiv,
    .out_rrr = tgen_divs,
};
static const TCGOutOpDivRem outop_divs2 = {
    .base.static_constraint = C_NotImplemented,
};
static void tgen_divu(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, TCGReg a2)
{
    /* udiv */
    tcg_out32(s, 0x0730f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8));
}
static const TCGOutOpBinary outop_divu = {
    .base.static_constraint = C_Dynamic,
    .base.dynamic_constraint = cset_idiv,
    .out_rrr = tgen_divu,
};
static const TCGOutOpDivRem outop_divu2 = {
    .base.static_constraint = C_NotImplemented,
};
static const TCGOutOpBinary outop_eqv = {
    .base.static_constraint = C_NotImplemented,
};
static void tgen_mul(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    /* mul */
    tcg_out32(s, (COND_AL << 28) | 0x90 | (a0 << 16) | (a1 << 8) | a2);
}
static const TCGOutOpBinary outop_mul = {
    .base.static_constraint = C_O1_I2(r, r, r),
    .out_rrr = tgen_mul,
};
static void tgen_muls2(TCGContext *s, TCGType type,
                       TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm)
{
    /* smull */
    tcg_out32(s, (COND_AL << 28) | 0x00c00090 |
              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
}
static const TCGOutOpMul2 outop_muls2 = {
    .base.static_constraint = C_O2_I2(r, r, r, r),
    .out_rrrr = tgen_muls2,
};
static const TCGOutOpBinary outop_mulsh = {
    .base.static_constraint = C_NotImplemented,
};
static void tgen_mulu2(TCGContext *s, TCGType type,
                       TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm)
{
    /* umull */
    tcg_out32(s, (COND_AL << 28) | 0x00800090 |
              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
}
static const TCGOutOpMul2 outop_mulu2 = {
    .base.static_constraint = C_O2_I2(r, r, r, r),
    .out_rrrr = tgen_mulu2,
};
static const TCGOutOpBinary outop_muluh = {
    .base.static_constraint = C_NotImplemented,
};
static const TCGOutOpBinary outop_nand = {
    .base.static_constraint = C_NotImplemented,
};
static const TCGOutOpBinary outop_nor = {
    .base.static_constraint = C_NotImplemented,
};
static void tgen_or(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_ORR, a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_ori(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_imm(s, COND_AL, ARITH_ORR, a0, a1, encode_imm_nofail(a2));
}
static const TCGOutOpBinary outop_or = {
    .base.static_constraint = C_O1_I2(r, r, rI),
    .out_rrr = tgen_or,
    .out_rri = tgen_ori,
};
static const TCGOutOpBinary outop_orc = {
    .base.static_constraint = C_NotImplemented,
};
static const TCGOutOpBinary outop_rems = {
    .base.static_constraint = C_NotImplemented,
};
static const TCGOutOpBinary outop_remu = {
    .base.static_constraint = C_NotImplemented,
};
static const TCGOutOpBinary outop_rotl = {
    .base.static_constraint = C_NotImplemented,
};
static void tgen_rotr(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ROR(a2));
}
static void tgen_rotri(TCGContext *s, TCGType type,
                       TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_IMM_ROR(a2 & 0x1f));
}
static const TCGOutOpBinary outop_rotr = {
    .base.static_constraint = C_O1_I2(r, r, ri),
    .out_rrr = tgen_rotr,
    .out_rri = tgen_rotri,
};
static void tgen_sar(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ASR(a2));
}
static void tgen_sari(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
                    SHIFT_IMM_ASR(a2 & 0x1f));
}
static const TCGOutOpBinary outop_sar = {
    .base.static_constraint = C_O1_I2(r, r, ri),
    .out_rrr = tgen_sar,
    .out_rri = tgen_sari,
};
static void tgen_shl(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSL(a2));
}
static void tgen_shli(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
                    SHIFT_IMM_LSL(a2 & 0x1f));
}
static const TCGOutOpBinary outop_shl = {
    .base.static_constraint = C_O1_I2(r, r, ri),
    .out_rrr = tgen_shl,
    .out_rri = tgen_shli,
};
static void tgen_shr(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSR(a2));
}
static void tgen_shri(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
                    SHIFT_IMM_LSR(a2 & 0x1f));
}
static const TCGOutOpBinary outop_shr = {
    .base.static_constraint = C_O1_I2(r, r, ri),
    .out_rrr = tgen_shr,
    .out_rri = tgen_shri,
};
static void tgen_sub(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_SUB, a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_subfi(TCGContext *s, TCGType type,
                       TCGReg a0, tcg_target_long a1, TCGReg a2)
{
    tcg_out_dat_imm(s, COND_AL, ARITH_RSB, a0, a2, encode_imm_nofail(a1));
}
static const TCGOutOpSubtract outop_sub = {
    .base.static_constraint = C_O1_I2(r, rI, r),
    .out_rrr = tgen_sub,
    .out_rir = tgen_subfi,
};
static void tgen_subbo_rrr(TCGContext *s, TCGType type,
                           TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_SUB | TO_CPSR,
                    a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_subbo_rri(TCGContext *s, TCGType type,
                           TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_IN(s, COND_AL, ARITH_SUB | TO_CPSR, ARITH_ADD | TO_CPSR,
                   a0, a1, a2);
}
static void tgen_subbo_rir(TCGContext *s, TCGType type,
                           TCGReg a0, tcg_target_long a1, TCGReg a2)
{
    tcg_out_dat_imm(s, COND_AL, ARITH_RSB | TO_CPSR,
                    a0, a2, encode_imm_nofail(a1));
}
static void tgen_subbo_rii(TCGContext *s, TCGType type,
                           TCGReg a0, tcg_target_long a1, tcg_target_long a2)
{
    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2);
    tgen_subbo_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP);
}
static const TCGOutOpAddSubCarry outop_subbo = {
    .base.static_constraint = C_O1_I2(r, rI, rIN),
    .out_rrr = tgen_subbo_rrr,
    .out_rri = tgen_subbo_rri,
    .out_rir = tgen_subbo_rir,
    .out_rii = tgen_subbo_rii,
};
static void tgen_subbi_rrr(TCGContext *s, TCGType type,
                           TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_SBC,
                    a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_subbi_rri(TCGContext *s, TCGType type,
                           TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_IK(s, COND_AL, ARITH_SBC, ARITH_ADC, a0, a1, a2);
}
static void tgen_subbi_rir(TCGContext *s, TCGType type,
                           TCGReg a0, tcg_target_long a1, TCGReg a2)
{
    tcg_out_dat_imm(s, COND_AL, ARITH_RSC, a0, a2, encode_imm_nofail(a1));
}
static void tgen_subbi_rii(TCGContext *s, TCGType type,
                           TCGReg a0, tcg_target_long a1, tcg_target_long a2)
{
    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2);
    tgen_subbi_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP);
}
static const TCGOutOpAddSubCarry outop_subbi = {
    .base.static_constraint = C_O1_I2(r, rI, rIK),
    .out_rrr = tgen_subbi_rrr,
    .out_rri = tgen_subbi_rri,
    .out_rir = tgen_subbi_rir,
    .out_rii = tgen_subbi_rii,
};
static void tgen_subbio_rrr(TCGContext *s, TCGType type,
                            TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_SBC | TO_CPSR,
                    a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_subbio_rri(TCGContext *s, TCGType type,
                            TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_IK(s, COND_AL, ARITH_SBC | TO_CPSR, ARITH_ADC | TO_CPSR,
                   a0, a1, a2);
}
static void tgen_subbio_rir(TCGContext *s, TCGType type,
                            TCGReg a0, tcg_target_long a1, TCGReg a2)
{
    tcg_out_dat_imm(s, COND_AL, ARITH_RSC | TO_CPSR,
                    a0, a2, encode_imm_nofail(a1));
}
static void tgen_subbio_rii(TCGContext *s, TCGType type,
                            TCGReg a0, tcg_target_long a1, tcg_target_long a2)
{
    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2);
    tgen_subbio_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP);
}
static const TCGOutOpAddSubCarry outop_subbio = {
    .base.static_constraint = C_O1_I2(r, rI, rIK),
    .out_rrr = tgen_subbio_rrr,
    .out_rri = tgen_subbio_rri,
    .out_rir = tgen_subbio_rir,
    .out_rii = tgen_subbio_rii,
};
static void tcg_out_set_borrow(TCGContext *s)
{
    tcg_out_movi_apsr_c(s, 0);  /* borrow = !carry */
}
static void tgen_xor(TCGContext *s, TCGType type,
                     TCGReg a0, TCGReg a1, TCGReg a2)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_EOR, a0, a1, a2, SHIFT_IMM_LSL(0));
}
static void tgen_xori(TCGContext *s, TCGType type,
                      TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    tcg_out_dat_imm(s, COND_AL, ARITH_EOR, a0, a1, encode_imm_nofail(a2));
}
static const TCGOutOpBinary outop_xor = {
    .base.static_constraint = C_O1_I2(r, r, rI),
    .out_rrr = tgen_xor,
    .out_rri = tgen_xori,
};
static void tgen_bswap16(TCGContext *s, TCGType type,
                         TCGReg rd, TCGReg rn, unsigned flags)
{
    if (flags & TCG_BSWAP_OS) {
        /* revsh */
        tcg_out32(s, 0x06ff0fb0 | (COND_AL << 28) | (rd << 12) | rn);
        return;
    }
    /* rev16 */
    tcg_out32(s, 0x06bf0fb0 | (COND_AL << 28) | (rd << 12) | rn);
    if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
        tcg_out_ext16u(s, rd, rd);
    }
}
static const TCGOutOpBswap outop_bswap16 = {
    .base.static_constraint = C_O1_I1(r, r),
    .out_rr = tgen_bswap16,
};
static void tgen_bswap32(TCGContext *s, TCGType type,
                         TCGReg rd, TCGReg rn, unsigned flags)
{
    /* rev */
    tcg_out32(s, 0x06bf0f30 | (COND_AL << 28) | (rd << 12) | rn);
}
static const TCGOutOpBswap outop_bswap32 = {
    .base.static_constraint = C_O1_I1(r, r),
    .out_rr = tgen_bswap32,
};
static const TCGOutOpUnary outop_bswap64 = {
    .base.static_constraint = C_NotImplemented,
};
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
{
    tgen_subfi(s, type, a0, 0, a1);
}
static const TCGOutOpUnary outop_neg = {
    .base.static_constraint = C_O1_I1(r, r),
    .out_rr = tgen_neg,
};
static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
{
    tcg_out_dat_reg(s, COND_AL, ARITH_MVN, a0, 0, a1, SHIFT_IMM_LSL(0));
}
static const TCGOutOpUnary outop_not = {
    .base.static_constraint = C_O1_I1(r, r),
    .out_rr = tgen_not,
};
static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
                        TCGReg a0, TCGReg a1, TCGLabel *l)
{
    cond = tgen_cmp(s, cond, a0, a1);
    tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l);
}
static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
                         TCGReg a0, tcg_target_long a1, TCGLabel *l)
{
    cond = tgen_cmpi(s, cond, a0, a1);
    tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l);
}
static const TCGOutOpBrcond outop_brcond = {
    .base.static_constraint = C_O0_I2(r, rIN),
    .out_rr = tgen_brcond,
    .out_ri = tgen_brcondi,
};
static void finish_setcond(TCGContext *s, TCGCond cond, TCGReg ret, bool neg)
{
    tcg_out_movi32(s, tcg_cond_to_arm_cond[tcg_invert_cond(cond)], ret, 0);
    tcg_out_movi32(s, tcg_cond_to_arm_cond[cond], ret, neg ? -1 : 1);
}
static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
                         TCGReg a0, TCGReg a1, TCGReg a2)
{
    cond = tgen_cmp(s, cond, a1, a2);
    finish_setcond(s, cond, a0, false);
}
static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
                          TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    cond = tgen_cmpi(s, cond, a1, a2);
    finish_setcond(s, cond, a0, false);
}
static const TCGOutOpSetcond outop_setcond = {
    .base.static_constraint = C_O1_I2(r, r, rIN),
    .out_rrr = tgen_setcond,
    .out_rri = tgen_setcondi,
};
static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
                            TCGReg a0, TCGReg a1, TCGReg a2)
{
    cond = tgen_cmp(s, cond, a1, a2);
    finish_setcond(s, cond, a0, true);
}
static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
                             TCGReg a0, TCGReg a1, tcg_target_long a2)
{
    cond = tgen_cmpi(s, cond, a1, a2);
    finish_setcond(s, cond, a0, true);
}
static const TCGOutOpSetcond outop_negsetcond = {
    .base.static_constraint = C_O1_I2(r, r, rIN),
    .out_rrr = tgen_negsetcond,
    .out_rri = tgen_negsetcondi,
};
static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
                         TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
                         TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf)
{
    cond = tcg_out_cmp(s, cond, c1, c2, const_c2);
    tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[cond], ARITH_MOV, ARITH_MVN,
                    ret, 0, vt, const_vt);
}
static const TCGOutOpMovcond outop_movcond = {
    .base.static_constraint = C_O1_I4(r, r, rIN, rIK, 0),
    .out = tgen_movcond,
};
static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
                         TCGArg bl, bool const_bl, TCGArg bh, bool const_bh,
                         TCGLabel *l)
{
    cond = tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh);
    tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l);
}
static const TCGOutOpBrcond2 outop_brcond2 = {
    .base.static_constraint = C_O0_I4(r, r, rI, rI),
    .out = tgen_brcond2,
};
static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
                          TCGReg al, TCGReg ah,
                          TCGArg bl, bool const_bl,
                          TCGArg bh, bool const_bh)
{
    cond = tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh);
    finish_setcond(s, cond, ret, false);
}
static const TCGOutOpSetcond2 outop_setcond2 = {
    .base.static_constraint = C_O1_I4(r, r, r, rI, rI),
    .out = tgen_setcond2,
};
static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0,
                          TCGReg a1, TCGReg a2, unsigned shr)
{
    /* We can do extract2 in 2 insns, vs the 3 required otherwise.  */
    tgen_shli(s, TCG_TYPE_I32, TCG_REG_TMP, a2, 32 - shr);
    tcg_out_dat_reg(s, COND_AL, ARITH_ORR, a0, TCG_REG_TMP,
                    a1, SHIFT_IMM_LSR(shr));
}
static const TCGOutOpExtract2 outop_extract2 = {
    .base.static_constraint = C_O1_I2(r, r, r),
    .out_rrr = tgen_extract2,
};
static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg rd,
                      TCGReg rn, ptrdiff_t offset)
{
    if (offset > 0xfff || offset < -0xfff) {
        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
        tcg_out_ld8_r(s, COND_AL, rd, rn, TCG_REG_TMP);
    } else {
        tcg_out_ld8_12(s, COND_AL, rd, rn, offset);
    }
}
static const TCGOutOpLoad outop_ld8u = {
    .base.static_constraint = C_O1_I1(r, r),
    .out = tgen_ld8u,
};
static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg rd,
                      TCGReg rn, ptrdiff_t offset)
{
    if (offset > 0xff || offset < -0xff) {
        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
        tcg_out_ld8s_r(s, COND_AL, rd, rn, TCG_REG_TMP);
    } else {
        tcg_out_ld8s_8(s, COND_AL, rd, rn, offset);
    }
}
static const TCGOutOpLoad outop_ld8s = {
    .base.static_constraint = C_O1_I1(r, r),
    .out = tgen_ld8s,
};
static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg rd,
                       TCGReg rn, ptrdiff_t offset)
{
    if (offset > 0xff || offset < -0xff) {
        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
        tcg_out_ld16u_r(s, COND_AL, rd, rn, TCG_REG_TMP);
    } else {
        tcg_out_ld16u_8(s, COND_AL, rd, rn, offset);
    }
}
static const TCGOutOpLoad outop_ld16u = {
    .base.static_constraint = C_O1_I1(r, r),
    .out = tgen_ld16u,
};
static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg rd,
                       TCGReg rn, ptrdiff_t offset)
{
    if (offset > 0xff || offset < -0xff) {
        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
        tcg_out_ld16s_r(s, COND_AL, rd, rn, TCG_REG_TMP);
    } else {
        tcg_out_ld16s_8(s, COND_AL, rd, rn, offset);
    }
}
static const TCGOutOpLoad outop_ld16s = {
    .base.static_constraint = C_O1_I1(r, r),
    .out = tgen_ld16s,
};
static void tgen_st8(TCGContext *s, TCGType type, TCGReg rd,
                     TCGReg rn, ptrdiff_t offset)
{
    if (offset > 0xfff || offset < -0xfff) {
        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
        tcg_out_st8_r(s, COND_AL, rd, rn, TCG_REG_TMP);
    } else {
        tcg_out_st8_12(s, COND_AL, rd, rn, offset);
    }
}
static const TCGOutOpStore outop_st8 = {
    .base.static_constraint = C_O0_I2(r, r),
    .out_r = tgen_st8,
};
static void tgen_st16(TCGContext *s, TCGType type, TCGReg rd,
                      TCGReg rn, ptrdiff_t offset)
{
    if (offset > 0xff || offset < -0xff) {
        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
        tcg_out_st16_r(s, COND_AL, rd, rn, TCG_REG_TMP);
    } else {
        tcg_out_st16_8(s, COND_AL, rd, rn, offset);
    }
}
static const TCGOutOpStore outop_st16 = {
    .base.static_constraint = C_O0_I2(r, r),
    .out_r = tgen_st16,
};
static const TCGOutOpStore outop_st = {
    .base.static_constraint = C_O0_I2(r, r),
    .out_r = tcg_out_st,
};
static TCGConstraintSetIndex
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
{
    switch (op) {
    case INDEX_op_st_vec:
        return C_O0_I2(w, r);
    case INDEX_op_ld_vec:
    case INDEX_op_dupm_vec:
        return C_O1_I1(w, r);
    case INDEX_op_dup_vec:
        return C_O1_I1(w, wr);
    case INDEX_op_abs_vec:
    case INDEX_op_neg_vec:
    case INDEX_op_not_vec:
    case INDEX_op_shli_vec:
    case INDEX_op_shri_vec:
    case INDEX_op_sari_vec:
        return C_O1_I1(w, w);
    case INDEX_op_dup2_vec:
    case INDEX_op_add_vec:
    case INDEX_op_mul_vec:
    case INDEX_op_smax_vec:
    case INDEX_op_smin_vec:
    case INDEX_op_ssadd_vec:
    case INDEX_op_sssub_vec:
    case INDEX_op_sub_vec:
    case INDEX_op_umax_vec:
    case INDEX_op_umin_vec:
    case INDEX_op_usadd_vec:
    case INDEX_op_ussub_vec:
    case INDEX_op_xor_vec:
    case INDEX_op_arm_sshl_vec:
    case INDEX_op_arm_ushl_vec:
        return C_O1_I2(w, w, w);
    case INDEX_op_arm_sli_vec:
        return C_O1_I2(w, 0, w);
    case INDEX_op_or_vec:
    case INDEX_op_andc_vec:
        return C_O1_I2(w, w, wO);
    case INDEX_op_and_vec:
    case INDEX_op_orc_vec:
        return C_O1_I2(w, w, wV);
    case INDEX_op_cmp_vec:
        return C_O1_I2(w, w, wZ);
    case INDEX_op_bitsel_vec:
        return C_O1_I3(w, w, w, w);
    default:
        return C_NotImplemented;
    }
}
static void tcg_target_init(TCGContext *s)
{
    /*
     * Only probe for the platform and capabilities if we haven't already
     * determined maximum values at compile time.
     */
#if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
    {
        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
#ifndef use_idiv_instructions
        use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
#endif
#ifndef use_neon_instructions
        use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
#endif
    }
#endif
    if (__ARM_ARCH < 7) {
        const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
        if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
            arm_arch = pl[1] - '0';
        }
        if (arm_arch < 6) {
            error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
            exit(EXIT_FAILURE);
        }
    }
    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
    tcg_target_call_clobber_regs = 0;
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
    if (use_neon_instructions) {
        tcg_target_available_regs[TCG_TYPE_V64]  = ALL_VECTOR_REGS;
        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
    }
    s->reserved_regs = 0;
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
}
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
                       TCGReg arg1, intptr_t arg2)
{
    switch (type) {
    case TCG_TYPE_I32:
        tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
        return;
    case TCG_TYPE_V64:
        /* regs 1; size 8; align 8 */
        tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
        return;
    case TCG_TYPE_V128:
        /*
         * We have only 8-byte alignment for the stack per the ABI.
         * Rather than dynamically re-align the stack, it's easier
         * to simply not request alignment beyond that.  So:
         * regs 2; size 8; align 8
         */
        tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2);
        return;
    default:
        g_assert_not_reached();
    }
}
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
                       TCGReg arg1, intptr_t arg2)
{
    switch (type) {
    case TCG_TYPE_I32:
        tcg_out_st32(s, COND_AL, arg, arg1, arg2);
        return;
    case TCG_TYPE_V64:
        /* regs 1; size 8; align 8 */
        tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
        return;
    case TCG_TYPE_V128:
        /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */
        tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2);
        return;
    default:
        g_assert_not_reached();
    }
}
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
                        TCGReg base, intptr_t ofs)
{
    return false;
}
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
{
    if (ret == arg) {
        return true;
    }
    switch (type) {
    case TCG_TYPE_I32:
        if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
            tcg_out_mov_reg(s, COND_AL, ret, arg);
            return true;
        }
        return false;
    case TCG_TYPE_V64:
    case TCG_TYPE_V128:
        /* "VMOV D,N" is an alias for "VORR D,N,N". */
        tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
        return true;
    default:
        g_assert_not_reached();
    }
}
static void tcg_out_movi(TCGContext *s, TCGType type,
                         TCGReg ret, tcg_target_long arg)
{
    tcg_debug_assert(type == TCG_TYPE_I32);
    tcg_debug_assert(ret < TCG_REG_Q0);
    tcg_out_movi32(s, COND_AL, ret, arg);
}
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
{
    return false;
}
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
                             tcg_target_long imm)
{
    int enc, opc = ARITH_ADD;
    /* All of the easiest immediates to encode are positive. */
    if (imm < 0) {
        imm = -imm;
        opc = ARITH_SUB;
    }
    enc = encode_imm(imm);
    if (enc >= 0) {
        tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc);
    } else {
        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm);
        tcg_out_dat_reg(s, COND_AL, opc, rd, rs,
                        TCG_REG_TMP, SHIFT_IMM_LSL(0));
    }
}
/* Type is always V128, with I64 elements.  */
static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
{
    /* Move high element into place first. */
    /* VMOV Dd+1, Ds */
    tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
    /* Move low element into place; tcg_out_mov will check for nop. */
    tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
}
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
                            TCGReg rd, TCGReg rs)
{
    int q = type - TCG_TYPE_V64;
    if (vece == MO_64) {
        if (type == TCG_TYPE_V128) {
            tcg_out_dup2_vec(s, rd, rs, rs);
        } else {
            tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
        }
    } else if (rs < TCG_REG_Q0) {
        int b = (vece == MO_8);
        int e = (vece == MO_16);
        tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
                  encode_vn(rd) | (rs << 12));
    } else {
        int imm4 = 1 << vece;
        tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
                  encode_vd(rd) | encode_vm(rs));
    }
    return true;
}
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
                             TCGReg rd, TCGReg base, intptr_t offset)
{
    if (vece == MO_64) {
        tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
        if (type == TCG_TYPE_V128) {
            tcg_out_dup2_vec(s, rd, rd, rd);
        }
    } else {
        int q = type - TCG_TYPE_V64;
        tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
                      rd, base, offset);
    }
    return true;
}
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
                             TCGReg rd, int64_t v64)
{
    int q = type - TCG_TYPE_V64;
    int cmode, imm8, i;
    /* Test all bytes equal first.  */
    if (vece == MO_8) {
        tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
        return;
    }
    /*
     * Test all bytes 0x00 or 0xff second.  This can match cases that
     * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
     */
    for (i = imm8 = 0; i < 8; i++) {
        uint8_t byte = v64 >> (i * 8);
        if (byte == 0xff) {
            imm8 |= 1 << i;
        } else if (byte != 0) {
            goto fail_bytes;
        }
    }
    tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
    return;
 fail_bytes:
    /*
     * Tests for various replications.  For each element width, if we
     * cannot find an expansion there's no point checking a larger
     * width because we already know by replication it cannot match.
     */
    if (vece == MO_16) {
        uint16_t v16 = v64;
        if (is_shimm16(v16, &cmode, &imm8)) {
            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
            return;
        }
        if (is_shimm16(~v16, &cmode, &imm8)) {
            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
            return;
        }
        /*
         * Otherwise, all remaining constants can be loaded in two insns:
         * rd = v16 & 0xff, rd |= v16 & 0xff00.
         */
        tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
        tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8);   /* VORRI */
        return;
    }
    if (vece == MO_32) {
        uint32_t v32 = v64;
        if (is_shimm32(v32, &cmode, &imm8) ||
            is_soimm32(v32, &cmode, &imm8)) {
            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
            return;
        }
        if (is_shimm32(~v32, &cmode, &imm8) ||
            is_soimm32(~v32, &cmode, &imm8)) {
            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
            return;
        }
        /*
         * Restrict the set of constants to those we can load with
         * two instructions.  Others we load from the pool.
         */
        i = is_shimm32_pair(v32, &cmode, &imm8);
        if (i) {
            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
            tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
            return;
        }
        i = is_shimm32_pair(~v32, &cmode, &imm8);
        if (i) {
            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
            tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
            return;
        }
    }
    /*
     * As a last resort, load from the constant pool.
     */
    if (!q || vece == MO_64) {
        new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
        /* VLDR Dd, [pc + offset] */
        tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
        if (q) {
            tcg_out_dup2_vec(s, rd, rd, rd);
        }
    } else {
        new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
        /* add tmp, pc, offset */
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
        tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
    }
}
static const ARMInsn vec_cmp_insn[16] = {
    [TCG_COND_EQ] = INSN_VCEQ,
    [TCG_COND_GT] = INSN_VCGT,
    [TCG_COND_GE] = INSN_VCGE,
    [TCG_COND_GTU] = INSN_VCGT_U,
    [TCG_COND_GEU] = INSN_VCGE_U,
};
static const ARMInsn vec_cmp0_insn[16] = {
    [TCG_COND_EQ] = INSN_VCEQ0,
    [TCG_COND_GT] = INSN_VCGT0,
    [TCG_COND_GE] = INSN_VCGE0,
    [TCG_COND_LT] = INSN_VCLT0,
    [TCG_COND_LE] = INSN_VCLE0,
};
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
                           unsigned vecl, unsigned vece,
                           const TCGArg args[TCG_MAX_OP_ARGS],
                           const int const_args[TCG_MAX_OP_ARGS])
{
    TCGType type = vecl + TCG_TYPE_V64;
    unsigned q = vecl;
    TCGArg a0, a1, a2, a3;
    int cmode, imm8;
    a0 = args[0];
    a1 = args[1];
    a2 = args[2];
    switch (opc) {
    case INDEX_op_ld_vec:
        tcg_out_ld(s, type, a0, a1, a2);
        return;
    case INDEX_op_st_vec:
        tcg_out_st(s, type, a0, a1, a2);
        return;
    case INDEX_op_dupm_vec:
        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
        return;
    case INDEX_op_dup2_vec:
        tcg_out_dup2_vec(s, a0, a1, a2);
        return;
    case INDEX_op_abs_vec:
        tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
        return;
    case INDEX_op_neg_vec:
        tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
        return;
    case INDEX_op_not_vec:
        tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
        return;
    case INDEX_op_add_vec:
        tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
        return;
    case INDEX_op_mul_vec:
        tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
        return;
    case INDEX_op_smax_vec:
        tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
        return;
    case INDEX_op_smin_vec:
        tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
        return;
    case INDEX_op_sub_vec:
        tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
        return;
    case INDEX_op_ssadd_vec:
        tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
        return;
    case INDEX_op_sssub_vec:
        tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
        return;
    case INDEX_op_umax_vec:
        tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
        return;
    case INDEX_op_umin_vec:
        tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
        return;
    case INDEX_op_usadd_vec:
        tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
        return;
    case INDEX_op_ussub_vec:
        tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
        return;
    case INDEX_op_xor_vec:
        tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
        return;
    case INDEX_op_arm_sshl_vec:
        /*
         * Note that Vm is the data and Vn is the shift count,
         * therefore the arguments appear reversed.
         */
        tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
        return;
    case INDEX_op_arm_ushl_vec:
        /* See above. */
        tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
        return;
    case INDEX_op_shli_vec:
        tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
        return;
    case INDEX_op_shri_vec:
        tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
        return;
    case INDEX_op_sari_vec:
        tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
        return;
    case INDEX_op_arm_sli_vec:
        tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
        return;
    case INDEX_op_andc_vec:
        if (!const_args[2]) {
            tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
            return;
        }
        a2 = ~a2;
        /* fall through */
    case INDEX_op_and_vec:
        if (const_args[2]) {
            is_shimm1632(~a2, &cmode, &imm8);
            if (a0 == a1) {
                tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
                return;
            }
            tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
            a2 = a0;
        }
        tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
        return;
    case INDEX_op_orc_vec:
        if (!const_args[2]) {
            tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
            return;
        }
        a2 = ~a2;
        /* fall through */
    case INDEX_op_or_vec:
        if (const_args[2]) {
            is_shimm1632(a2, &cmode, &imm8);
            if (a0 == a1) {
                tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
                return;
            }
            tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
            a2 = a0;
        }
        tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
        return;
    case INDEX_op_cmp_vec:
        {
            TCGCond cond = args[3];
            ARMInsn insn;
            switch (cond) {
            case TCG_COND_NE:
                if (const_args[2]) {
                    tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
                } else {
                    tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
                }
                break;
            case TCG_COND_TSTNE:
            case TCG_COND_TSTEQ:
                if (const_args[2]) {
                    /* (x & 0) == 0 */
                    tcg_out_dupi_vec(s, type, MO_8, a0,
                                     -(cond == TCG_COND_TSTEQ));
                    break;
                }
                tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a2);
                if (cond == TCG_COND_TSTEQ) {
                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
                }
                break;
            default:
                if (const_args[2]) {
                    insn = vec_cmp0_insn[cond];
                    if (insn) {
                        tcg_out_vreg2(s, insn, q, vece, a0, a1);
                        return;
                    }
                    tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
                    a2 = TCG_VEC_TMP;
                }
                insn = vec_cmp_insn[cond];
                if (insn == 0) {
                    TCGArg t;
                    t = a1, a1 = a2, a2 = t;
                    cond = tcg_swap_cond(cond);
                    insn = vec_cmp_insn[cond];
                    tcg_debug_assert(insn != 0);
                }
                tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
                break;
            }
        }
        return;
    case INDEX_op_bitsel_vec:
        a3 = args[3];
        if (a0 == a3) {
            tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
        } else if (a0 == a2) {
            tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
        } else {
            tcg_out_mov(s, type, a0, a1);
            tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
        }
        return;
    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
    default:
        g_assert_not_reached();
    }
}
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
{
    switch (opc) {
    case INDEX_op_add_vec:
    case INDEX_op_sub_vec:
    case INDEX_op_and_vec:
    case INDEX_op_andc_vec:
    case INDEX_op_or_vec:
    case INDEX_op_orc_vec:
    case INDEX_op_xor_vec:
    case INDEX_op_not_vec:
    case INDEX_op_shli_vec:
    case INDEX_op_shri_vec:
    case INDEX_op_sari_vec:
    case INDEX_op_ssadd_vec:
    case INDEX_op_sssub_vec:
    case INDEX_op_usadd_vec:
    case INDEX_op_ussub_vec:
    case INDEX_op_bitsel_vec:
        return 1;
    case INDEX_op_abs_vec:
    case INDEX_op_cmp_vec:
    case INDEX_op_mul_vec:
    case INDEX_op_neg_vec:
    case INDEX_op_smax_vec:
    case INDEX_op_smin_vec:
    case INDEX_op_umax_vec:
    case INDEX_op_umin_vec:
        return vece < MO_64;
    case INDEX_op_shlv_vec:
    case INDEX_op_shrv_vec:
    case INDEX_op_sarv_vec:
    case INDEX_op_rotli_vec:
    case INDEX_op_rotlv_vec:
    case INDEX_op_rotrv_vec:
        return -1;
    default:
        return 0;
    }
}
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
                       TCGArg a0, ...)
{
    va_list va;
    TCGv_vec v0, v1, v2, t1, t2, c1;
    TCGArg a2;
    va_start(va, a0);
    v0 = temp_tcgv_vec(arg_temp(a0));
    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
    a2 = va_arg(va, TCGArg);
    va_end(va);
    switch (opc) {
    case INDEX_op_shlv_vec:
        /*
         * Merely propagate shlv_vec to arm_ushl_vec.
         * In this way we don't set TCG_TARGET_HAS_shv_vec
         * because everything is done via expansion.
         */
        v2 = temp_tcgv_vec(arg_temp(a2));
        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
        break;
    case INDEX_op_shrv_vec:
    case INDEX_op_sarv_vec:
        /* Right shifts are negative left shifts for NEON.  */
        v2 = temp_tcgv_vec(arg_temp(a2));
        t1 = tcg_temp_new_vec(type);
        tcg_gen_neg_vec(vece, t1, v2);
        if (opc == INDEX_op_shrv_vec) {
            opc = INDEX_op_arm_ushl_vec;
        } else {
            opc = INDEX_op_arm_sshl_vec;
        }
        vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
        tcg_temp_free_vec(t1);
        break;
    case INDEX_op_rotli_vec:
        t1 = tcg_temp_new_vec(type);
        tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
        vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
        tcg_temp_free_vec(t1);
        break;
    case INDEX_op_rotlv_vec:
        v2 = temp_tcgv_vec(arg_temp(a2));
        t1 = tcg_temp_new_vec(type);
        c1 = tcg_constant_vec(type, vece, 8 << vece);
        tcg_gen_sub_vec(vece, t1, v2, c1);
        /* Right shifts are negative left shifts for NEON.  */
        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
        tcg_gen_or_vec(vece, v0, v0, t1);
        tcg_temp_free_vec(t1);
        break;
    case INDEX_op_rotrv_vec:
        v2 = temp_tcgv_vec(arg_temp(a2));
        t1 = tcg_temp_new_vec(type);
        t2 = tcg_temp_new_vec(type);
        c1 = tcg_constant_vec(type, vece, 8 << vece);
        tcg_gen_neg_vec(vece, t1, v2);
        tcg_gen_sub_vec(vece, t2, c1, v2);
        /* Right shifts are negative left shifts for NEON.  */
        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
                  tcgv_vec_arg(v1), tcgv_vec_arg(t2));
        tcg_gen_or_vec(vece, v0, t1, t2);
        tcg_temp_free_vec(t1);
        tcg_temp_free_vec(t2);
        break;
    default:
        g_assert_not_reached();
    }
}
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
{
    int i;
    for (i = 0; i < count; ++i) {
        p[i] = INSN_NOP;
    }
}
/* Compute frame size via macros, to share between tcg_target_qemu_prologue
   and tcg_register_jit.  */
#define PUSH_SIZE  ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
#define FRAME_SIZE \
    ((PUSH_SIZE \
      + TCG_STATIC_CALL_ARGS_SIZE \
      + CPU_TEMP_BUF_NLONGS * sizeof(long) \
      + TCG_TARGET_STACK_ALIGN - 1) \
     & -TCG_TARGET_STACK_ALIGN)
#define STACK_ADDEND  (FRAME_SIZE - PUSH_SIZE)
static void tcg_target_qemu_prologue(TCGContext *s)
{
    /* Calling convention requires us to save r4-r11 and lr.  */
    /* stmdb sp!, { r4 - r11, lr } */
    tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
    /* Reserve callee argument and tcg temp space.  */
    tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
                  CPU_TEMP_BUF_NLONGS * sizeof(long));
    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
    if (!tcg_use_softmmu && guest_base) {
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
        tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
    }
    tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
    /*
     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
     * and fall through to the rest of the epilogue.
     */
    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
    tcg_out_epilogue(s);
}
static void tcg_out_epilogue(TCGContext *s)
{
    /* Release local stack frame.  */
    tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
    /* ldmia sp!, { r4 - r11, pc } */
    tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
}
static void tcg_out_tb_start(TCGContext *s)
{
    /* nothing to do */
}
typedef struct {
    DebugFrameHeader h;
    uint8_t fde_def_cfa[4];
    uint8_t fde_reg_ofs[18];
} DebugFrame;
#define ELF_HOST_MACHINE EM_ARM
/* We're expecting a 2 byte uleb128 encoded value.  */
QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
static const DebugFrame debug_frame = {
    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
    .h.cie.id = -1,
    .h.cie.version = 1,
    .h.cie.code_align = 1,
    .h.cie.data_align = 0x7c,             /* sleb128 -4 */
    .h.cie.return_column = 14,
    /* Total FDE size does not include the "len" member.  */
    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
    .fde_def_cfa = {
        12, 13,                         /* DW_CFA_def_cfa sp, ... */
        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
        (FRAME_SIZE >> 7)
    },
    .fde_reg_ofs = {
        /* The following must match the stmdb in the prologue.  */
        0x8e, 1,                        /* DW_CFA_offset, lr, -4 */
        0x8b, 2,                        /* DW_CFA_offset, r11, -8 */
        0x8a, 3,                        /* DW_CFA_offset, r10, -12 */
        0x89, 4,                        /* DW_CFA_offset, r9, -16 */
        0x88, 5,                        /* DW_CFA_offset, r8, -20 */
        0x87, 6,                        /* DW_CFA_offset, r7, -24 */
        0x86, 7,                        /* DW_CFA_offset, r6, -28 */
        0x85, 8,                        /* DW_CFA_offset, r5, -32 */
        0x84, 9,                        /* DW_CFA_offset, r4, -36 */
    }
};
void tcg_register_jit(const void *buf, size_t buf_size)
{
    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
}
 |