hypervisor.h 96.6 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945
#ifndef _SPARC64_HYPERVISOR_H
#define _SPARC64_HYPERVISOR_H

/* Sun4v hypervisor interfaces and defines.
 *
 * Hypervisor calls are made via traps to software traps number 0x80
 * and above.  Registers %o0 to %o5 serve as argument, status, and
 * return value registers.
 *
 * There are two kinds of these traps.  First there are the normal
 * "fast traps" which use software trap 0x80 and encode the function
 * to invoke by number in register %o5.  Argument and return value
 * handling is as follows:
 *
 * -----------------------------------------------
 * |  %o5  | function number |     undefined     |
 * |  %o0  |   argument 0    |   return status   |
 * |  %o1  |   argument 1    |   return value 1  |
 * |  %o2  |   argument 2    |   return value 2  |
 * |  %o3  |   argument 3    |   return value 3  |
 * |  %o4  |   argument 4    |   return value 4  |
 * -----------------------------------------------
 *
 * The second type are "hyper-fast traps" which encode the function
 * number in the software trap number itself.  So these use trap
 * numbers > 0x80.  The register usage for hyper-fast traps is as
 * follows:
 *
 * -----------------------------------------------
 * |  %o0  |   argument 0    |   return status   |
 * |  %o1  |   argument 1    |   return value 1  |
 * |  %o2  |   argument 2    |   return value 2  |
 * |  %o3  |   argument 3    |   return value 3  |
 * |  %o4  |   argument 4    |   return value 4  |
 * -----------------------------------------------
 *
 * Registers providing explicit arguments to the hypervisor calls
 * are volatile across the call.  Upon return their values are
 * undefined unless explicitly specified as containing a particular
 * return value by the specific call.  The return status is always
 * returned in register %o0, zero indicates a successful execution of
 * the hypervisor call and other values indicate an error status as
 * defined below.  So, for example, if a hyper-fast trap takes
 * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across
 * the call and %o3, %o4, and %o5 would be preserved.
 *
 * If the hypervisor trap is invalid, or the fast trap function number
 * is invalid, HV_EBADTRAP will be returned in %o0.  Also, all 64-bits
 * of the argument and return values are significant.
 */

/* Trap numbers.  */
#define HV_FAST_TRAP		0x80
#define HV_MMU_MAP_ADDR_TRAP	0x83
#define HV_MMU_UNMAP_ADDR_TRAP	0x84
#define HV_TTRACE_ADDENTRY_TRAP	0x85
#define HV_CORE_TRAP		0xff

/* Error codes.  */
#define HV_EOK				0  /* Successful return            */
#define HV_ENOCPU			1  /* Invalid CPU id               */
#define HV_ENORADDR			2  /* Invalid real address         */
#define HV_ENOINTR			3  /* Invalid interrupt id         */
#define HV_EBADPGSZ			4  /* Invalid pagesize encoding    */
#define HV_EBADTSB			5  /* Invalid TSB description      */
#define HV_EINVAL			6  /* Invalid argument             */
#define HV_EBADTRAP			7  /* Invalid function number      */
#define HV_EBADALIGN			8  /* Invalid address alignment    */
#define HV_EWOULDBLOCK			9  /* Cannot complete w/o blocking */
#define HV_ENOACCESS			10 /* No access to resource        */
#define HV_EIO				11 /* I/O error                    */
#define HV_ECPUERROR			12 /* CPU in error state           */
#define HV_ENOTSUPPORTED		13 /* Function not supported       */
#define HV_ENOMAP			14 /* No mapping found             */
#define HV_ETOOMANY			15 /* Too many items specified     */
#define HV_ECHANNEL			16 /* Invalid LDC channel          */
#define HV_EBUSY			17 /* Resource busy                */

/* mach_exit()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MACH_EXIT
 * ARG0:	exit code
 * ERRORS:	This service does not return.
 *
 * Stop all CPUs in the virtual domain and place them into the stopped
 * state.  The 64-bit exit code may be passed to a service entity as
 * the domain's exit status.  On systems without a service entity, the
 * domain will undergo a reset, and the boot firmware will be
 * reloaded.
 *
 * This function will never return to the guest that invokes it.
 *
 * Note: By convention an exit code of zero denotes a successful exit by
 *       the guest code.  A non-zero exit code denotes a guest specific
 *       error indication.
 *
 */
#define HV_FAST_MACH_EXIT		0x00

#ifndef __ASSEMBLY__
extern void sun4v_mach_exit(unsigned long exit_code);
#endif

/* Domain services.  */

/* mach_desc()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MACH_DESC
 * ARG0:	buffer
 * ARG1:	length
 * RET0:	status
 * RET1:	length
 * ERRORS:	HV_EBADALIGN	Buffer is badly aligned
 *		HV_ENORADDR	Buffer is to an illegal real address.
 *		HV_EINVAL	Buffer length is too small for complete
 *				machine description.
 *
 * Copy the most current machine description into the buffer indicated
 * by the real address in ARG0.  The buffer provided must be 16 byte
 * aligned.  Upon success or HV_EINVAL, this service returns the
 * actual size of the machine description in the RET1 return value.
 *
 * Note: A method of determining the appropriate buffer size for the
 *       machine description is to first call this service with a buffer
 *       length of 0 bytes.
 */
#define HV_FAST_MACH_DESC		0x01

#ifndef __ASSEMBLY__
extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
				     unsigned long buf_len,
				     unsigned long *real_buf_len);
#endif

/* mach_sir()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MACH_SIR
 * ERRORS:	This service does not return.
 *
 * Perform a software initiated reset of the virtual machine domain.
 * All CPUs are captured as soon as possible, all hardware devices are
 * returned to the entry default state, and the domain is restarted at
 * the SIR (trap type 0x04) real trap table (RTBA) entry point on one
 * of the CPUs.  The single CPU restarted is selected as determined by
 * platform specific policy.  Memory is preserved across this
 * operation.
 */
#define HV_FAST_MACH_SIR		0x02

#ifndef __ASSEMBLY__
extern void sun4v_mach_sir(void);
#endif

/* mach_set_watchdog()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MACH_SET_WATCHDOG
 * ARG0:	timeout in milliseconds
 * RET0:	status
 * RET1:	time remaining in milliseconds
 *
 * A guest uses this API to set a watchdog timer.  Once the gues has set
 * the timer, it must call the timer service again either to disable or
 * postpone the expiration.  If the timer expires before being reset or
 * disabled, then the hypervisor take a platform specific action leading
 * to guest termination within a bounded time period.  The platform action
 * may include recovery actions such as reporting the expiration to a
 * Service Processor, and/or automatically restarting the gues.
 *
 * The 'timeout' parameter is specified in milliseconds, however the
 * implementated granularity is given by the 'watchdog-resolution'
 * property in the 'platform' node of the guest's machine description.
 * The largest allowed timeout value is specified by the
 * 'watchdog-max-timeout' property of the 'platform' node.
 *
 * If the 'timeout' argument is not zero, the watchdog timer is set to
 * expire after a minimum of 'timeout' milliseconds.
 *
 * If the 'timeout' argument is zero, the watchdog timer is disabled.
 *
 * If the 'timeout' value exceeds the value of the 'max-watchdog-timeout'
 * property, the hypervisor leaves the watchdog timer state unchanged,
 * and returns a status of EINVAL.
 *
 * The 'time remaining' return value is valid regardless of whether the
 * return status is EOK or EINVAL.  A non-zero return value indicates the
 * number of milliseconds that were remaining until the timer was to expire.
 * If less than one millisecond remains, the return value is '1'.  If the
 * watchdog timer was disabled at the time of the call, the return value is
 * zero.
 *
 * If the hypervisor cannot support the exact timeout value requested, but
 * can support a larger timeout value, the hypervisor may round the actual
 * timeout to a value larger than the requested timeout, consequently the
 * 'time remaining' return value may be larger than the previously requested
 * timeout value.
 *
 * Any guest OS debugger should be aware that the watchdog service may be in
 * use.  Consequently, it is recommended that the watchdog service is
 * disabled upon debugger entry (e.g. reaching a breakpoint), and then
 * re-enabled upon returning to normal execution.  The API has been designed
 * with this in mind, and the 'time remaining' result of the disable call may
 * be used directly as the timeout argument of the re-enable call.
 */
#define HV_FAST_MACH_SET_WATCHDOG	0x05

#ifndef __ASSEMBLY__
extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
					     unsigned long *orig_timeout);
#endif

/* CPU services.
 *
 * CPUs represent devices that can execute software threads.  A single
 * chip that contains multiple cores or strands is represented as
 * multiple CPUs with unique CPU identifiers.  CPUs are exported to
 * OBP via the machine description (and to the OS via the OBP device
 * tree).  CPUs are always in one of three states: stopped, running,
 * or error.
 *
 * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a
 * CPU within a logical domain.  Operations that are to be performed
 * on multiple CPUs specify them via a CPU list.  A CPU list is an
 * array in real memory, of which each 16-bit word is a CPU ID.  CPU
 * lists are passed through the API as two arguments.  The first is
 * the number of entries (16-bit words) in the CPU list, and the
 * second is the (real address) pointer to the CPU ID list.
 */

/* cpu_start()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_START
 * ARG0:	CPU ID
 * ARG1:	PC
 * ARG2:	RTBA
 * ARG3:	target ARG0
 * RET0:	status
 * ERRORS:	ENOCPU		Invalid CPU ID
 *		EINVAL		Target CPU ID is not in the stopped state
 *		ENORADDR	Invalid PC or RTBA real address
 *		EBADALIGN	Unaligned PC or unaligned RTBA
 *		EWOULDBLOCK	Starting resources are not available
 *
 * Start CPU with given CPU ID with PC in %pc and with a real trap
 * base address value of RTBA.  The indicated CPU must be in the
 * stopped state.  The supplied RTBA must be aligned on a 256 byte
 * boundary.  On successful completion, the specified CPU will be in
 * the running state and will be supplied with "target ARG0" in %o0
 * and RTBA in %tba.
 */
#define HV_FAST_CPU_START		0x10

#ifndef __ASSEMBLY__
extern unsigned long sun4v_cpu_start(unsigned long cpuid,
				     unsigned long pc,
				     unsigned long rtba,
				     unsigned long arg0);
#endif

/* cpu_stop()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_STOP
 * ARG0:	CPU ID
 * RET0:	status
 * ERRORS:	ENOCPU		Invalid CPU ID
 *		EINVAL		Target CPU ID is the current cpu
 *		EINVAL		Target CPU ID is not in the running state
 *		EWOULDBLOCK	Stopping resources are not available
 *		ENOTSUPPORTED	Not supported on this platform
 *
 * The specified CPU is stopped.  The indicated CPU must be in the
 * running state.  On completion, it will be in the stopped state.  It
 * is not legal to stop the current CPU.
 *
 * Note: As this service cannot be used to stop the current cpu, this service
 *       may not be used to stop the last running CPU in a domain.  To stop
 *       and exit a running domain, a guest must use the mach_exit() service.
 */
#define HV_FAST_CPU_STOP		0x11

#ifndef __ASSEMBLY__
extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
#endif

/* cpu_yield()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_YIELD
 * RET0:	status
 * ERRORS:	No possible error.
 *
 * Suspend execution on the current CPU.  Execution will resume when
 * an interrupt (device, %stick_compare, or cross-call) is targeted to
 * the CPU.  On some CPUs, this API may be used by the hypervisor to
 * save power by disabling hardware strands.
 */
#define HV_FAST_CPU_YIELD		0x12

#ifndef __ASSEMBLY__
extern unsigned long sun4v_cpu_yield(void);
#endif

/* cpu_qconf()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_QCONF
 * ARG0:	queue
 * ARG1:	base real address
 * ARG2:	number of entries
 * RET0:	status
 * ERRORS:	ENORADDR	Invalid base real address
 *		EINVAL		Invalid queue or number of entries is less
 *				than 2 or too large.
 *		EBADALIGN	Base real address is not correctly aligned
 *				for size.
 *
 * Configure the given queue to be placed at the given base real
 * address, with the given number of entries.  The number of entries
 * must be a power of 2.  The base real address must be aligned
 * exactly to match the queue size.  Each queue entry is 64 bytes
 * long, so for example a 32 entry queue must be aligned on a 2048
 * byte real address boundary.
 *
 * The specified queue is unconfigured if the number of entries is given
 * as zero.
 *
 * For the current version of this API service, the argument queue is defined
 * as follows:
 *
 *	queue		description
 *	-----		-------------------------
 *	0x3c		cpu mondo queue
 *	0x3d		device mondo queue
 *	0x3e		resumable error queue
 *	0x3f		non-resumable error queue
 *
 * Note: The maximum number of entries for each queue for a specific cpu may
 *       be determined from the machine description.
 */
#define HV_FAST_CPU_QCONF		0x14
#define  HV_CPU_QUEUE_CPU_MONDO		 0x3c
#define  HV_CPU_QUEUE_DEVICE_MONDO	 0x3d
#define  HV_CPU_QUEUE_RES_ERROR		 0x3e
#define  HV_CPU_QUEUE_NONRES_ERROR	 0x3f

#ifndef __ASSEMBLY__
extern unsigned long sun4v_cpu_qconf(unsigned long type,
				     unsigned long queue_paddr,
				     unsigned long num_queue_entries);
#endif

/* cpu_qinfo()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_QINFO
 * ARG0:	queue
 * RET0:	status
 * RET1:	base real address
 * RET1:	number of entries
 * ERRORS:	EINVAL		Invalid queue
 *
 * Return the configuration info for the given queue.  The base real
 * address and number of entries of the defined queue are returned.
 * The queue argument values are the same as for cpu_qconf() above.
 *
 * If the specified queue is a valid queue number, but no queue has
 * been defined, the number of entries will be set to zero and the
 * base real address returned is undefined.
 */
#define HV_FAST_CPU_QINFO		0x15

/* cpu_mondo_send()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_MONDO_SEND
 * ARG0-1:	CPU list
 * ARG2:	data real address
 * RET0:	status
 * ERRORS:	EBADALIGN	Mondo data is not 64-byte aligned or CPU list
 *				is not 2-byte aligned.
 *		ENORADDR	Invalid data mondo address, or invalid cpu list
 *				address.
 *		ENOCPU		Invalid cpu in CPU list
 *		EWOULDBLOCK	Some or all of the listed CPUs did not receive
 *				the mondo
 *		ECPUERROR	One or more of the listed CPUs are in error
 *				state, use HV_FAST_CPU_STATE to see which ones
 *		EINVAL		CPU list includes caller's CPU ID
 *
 * Send a mondo interrupt to the CPUs in the given CPU list with the
 * 64-bytes at the given data real address.  The data must be 64-byte
 * aligned.  The mondo data will be delivered to the cpu_mondo queues
 * of the recipient CPUs.
 *
 * In all cases, error or not, the CPUs in the CPU list to which the
 * mondo has been successfully delivered will be indicated by having
 * their entry in CPU list updated with the value 0xffff.
 */
#define HV_FAST_CPU_MONDO_SEND		0x42

#ifndef __ASSEMBLY__
extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa);
#endif

/* cpu_myid()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_MYID
 * RET0:	status
 * RET1:	CPU ID
 * ERRORS:	No errors defined.
 *
 * Return the hypervisor ID handle for the current CPU.  Use by a
 * virtual CPU to discover it's own identity.
 */
#define HV_FAST_CPU_MYID		0x16

/* cpu_state()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_STATE
 * ARG0:	CPU ID
 * RET0:	status
 * RET1:	state
 * ERRORS:	ENOCPU		Invalid CPU ID
 *
 * Retrieve the current state of the CPU with the given CPU ID.
 */
#define HV_FAST_CPU_STATE		0x17
#define  HV_CPU_STATE_STOPPED		 0x01
#define  HV_CPU_STATE_RUNNING		 0x02
#define  HV_CPU_STATE_ERROR		 0x03

#ifndef __ASSEMBLY__
extern long sun4v_cpu_state(unsigned long cpuid);
#endif

/* cpu_set_rtba()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_SET_RTBA
 * ARG0:	RTBA
 * RET0:	status
 * RET1:	previous RTBA
 * ERRORS:	ENORADDR	Invalid RTBA real address
 *		EBADALIGN	RTBA is incorrectly aligned for a trap table
 *
 * Set the real trap base address of the local cpu to the given RTBA.
 * The supplied RTBA must be aligned on a 256 byte boundary.  Upon
 * success the previous value of the RTBA is returned in RET1.
 *
 * Note: This service does not affect %tba
 */
#define HV_FAST_CPU_SET_RTBA		0x18

/* cpu_set_rtba()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_GET_RTBA
 * RET0:	status
 * RET1:	previous RTBA
 * ERRORS:	No possible error.
 *
 * Returns the current value of RTBA in RET1.
 */
#define HV_FAST_CPU_GET_RTBA		0x19

/* MMU services.
 *
 * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls.
 */
#ifndef __ASSEMBLY__
struct hv_tsb_descr {
	unsigned short		pgsz_idx;
	unsigned short		assoc;
	unsigned int		num_ttes;	/* in TTEs */
	unsigned int		ctx_idx;
	unsigned int		pgsz_mask;
	unsigned long		tsb_base;
	unsigned long		resv;
};
#endif
#define HV_TSB_DESCR_PGSZ_IDX_OFFSET	0x00
#define HV_TSB_DESCR_ASSOC_OFFSET	0x02
#define HV_TSB_DESCR_NUM_TTES_OFFSET	0x04
#define HV_TSB_DESCR_CTX_IDX_OFFSET	0x08
#define HV_TSB_DESCR_PGSZ_MASK_OFFSET	0x0c
#define HV_TSB_DESCR_TSB_BASE_OFFSET	0x10
#define HV_TSB_DESCR_RESV_OFFSET	0x18

/* Page size bitmask.  */
#define HV_PGSZ_MASK_8K			(1 << 0)
#define HV_PGSZ_MASK_64K		(1 << 1)
#define HV_PGSZ_MASK_512K		(1 << 2)
#define HV_PGSZ_MASK_4MB		(1 << 3)
#define HV_PGSZ_MASK_32MB		(1 << 4)
#define HV_PGSZ_MASK_256MB		(1 << 5)
#define HV_PGSZ_MASK_2GB		(1 << 6)
#define HV_PGSZ_MASK_16GB		(1 << 7)

/* Page size index.  The value given in the TSB descriptor must correspond
 * to the smallest page size specified in the pgsz_mask page size bitmask.
 */
#define HV_PGSZ_IDX_8K			0
#define HV_PGSZ_IDX_64K			1
#define HV_PGSZ_IDX_512K		2
#define HV_PGSZ_IDX_4MB			3
#define HV_PGSZ_IDX_32MB		4
#define HV_PGSZ_IDX_256MB		5
#define HV_PGSZ_IDX_2GB			6
#define HV_PGSZ_IDX_16GB		7

/* MMU fault status area.
 *
 * MMU related faults have their status and fault address information
 * placed into a memory region made available by privileged code.  Each
 * virtual processor must make a mmu_fault_area_conf() call to tell the
 * hypervisor where that processor's fault status should be stored.
 *
 * The fault status block is a multiple of 64-bytes and must be aligned
 * on a 64-byte boundary.
 */
#ifndef __ASSEMBLY__
struct hv_fault_status {
	unsigned long		i_fault_type;
	unsigned long		i_fault_addr;
	unsigned long		i_fault_ctx;
	unsigned long		i_reserved[5];
	unsigned long		d_fault_type;
	unsigned long		d_fault_addr;
	unsigned long		d_fault_ctx;
	unsigned long		d_reserved[5];
};
#endif
#define HV_FAULT_I_TYPE_OFFSET	0x00
#define HV_FAULT_I_ADDR_OFFSET	0x08
#define HV_FAULT_I_CTX_OFFSET	0x10
#define HV_FAULT_D_TYPE_OFFSET	0x40
#define HV_FAULT_D_ADDR_OFFSET	0x48
#define HV_FAULT_D_CTX_OFFSET	0x50

#define HV_FAULT_TYPE_FAST_MISS	1
#define HV_FAULT_TYPE_FAST_PROT	2
#define HV_FAULT_TYPE_MMU_MISS	3
#define HV_FAULT_TYPE_INV_RA	4
#define HV_FAULT_TYPE_PRIV_VIOL	5
#define HV_FAULT_TYPE_PROT_VIOL	6
#define HV_FAULT_TYPE_NFO	7
#define HV_FAULT_TYPE_NFO_SEFF	8
#define HV_FAULT_TYPE_INV_VA	9
#define HV_FAULT_TYPE_INV_ASI	10
#define HV_FAULT_TYPE_NC_ATOMIC	11
#define HV_FAULT_TYPE_PRIV_ACT	12
#define HV_FAULT_TYPE_RESV1	13
#define HV_FAULT_TYPE_UNALIGNED	14
#define HV_FAULT_TYPE_INV_PGSZ	15
/* Values 16 --> -2 are reserved.  */
#define HV_FAULT_TYPE_MULTIPLE	-1

/* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(),
 * and mmu_{map,unmap}_perm_addr().
 */
#define HV_MMU_DMMU			0x01
#define HV_MMU_IMMU			0x02
#define HV_MMU_ALL			(HV_MMU_DMMU | HV_MMU_IMMU)

/* mmu_map_addr()
 * TRAP:	HV_MMU_MAP_ADDR_TRAP
 * ARG0:	virtual address
 * ARG1:	mmu context
 * ARG2:	TTE
 * ARG3:	flags (HV_MMU_{IMMU,DMMU})
 * ERRORS:	EINVAL		Invalid virtual address, mmu context, or flags
 *		EBADPGSZ	Invalid page size value
 *		ENORADDR	Invalid real address in TTE
 *
 * Create a non-permanent mapping using the given TTE, virtual
 * address, and mmu context.  The flags argument determines which
 * (data, or instruction, or both) TLB the mapping gets loaded into.
 *
 * The behavior is undefined if the valid bit is clear in the TTE.
 *
 * Note: This API call is for privileged code to specify temporary translation
 *       mappings without the need to create and manage a TSB.
 */

/* mmu_unmap_addr()
 * TRAP:	HV_MMU_UNMAP_ADDR_TRAP
 * ARG0:	virtual address
 * ARG1:	mmu context
 * ARG2:	flags (HV_MMU_{IMMU,DMMU})
 * ERRORS:	EINVAL		Invalid virtual address, mmu context, or flags
 *
 * Demaps the given virtual address in the given mmu context on this
 * CPU.  This function is intended to be used to demap pages mapped
 * with mmu_map_addr.  This service is equivalent to invoking
 * mmu_demap_page() with only the current CPU in the CPU list. The
 * flags argument determines which (data, or instruction, or both) TLB
 * the mapping gets unmapped from.
 *
 * Attempting to perform an unmap operation for a previously defined
 * permanent mapping will have undefined results.
 */

/* mmu_tsb_ctx0()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_TSB_CTX0
 * ARG0:	number of TSB descriptions
 * ARG1:	TSB descriptions pointer
 * RET0:	status
 * ERRORS:	ENORADDR		Invalid TSB descriptions pointer or
 *					TSB base within a descriptor
 *		EBADALIGN		TSB descriptions pointer is not aligned
 *					to an 8-byte boundary, or TSB base
 *					within a descriptor is not aligned for
 *					the given TSB size
 *		EBADPGSZ		Invalid page size in a TSB descriptor
 *		EBADTSB			Invalid associativity or size in a TSB
 *					descriptor
 *		EINVAL			Invalid number of TSB descriptions, or
 *					invalid context index in a TSB
 *					descriptor, or index page size not
 *					equal to smallest page size in page
 *					size bitmask field.
 *
 * Configures the TSBs for the current CPU for virtual addresses with
 * context zero.  The TSB descriptions pointer is a pointer to an
 * array of the given number of TSB descriptions.
 *
 * Note: The maximum number of TSBs available to a virtual CPU is given by the
 *       mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the
 *       machine description.
 */
#define HV_FAST_MMU_TSB_CTX0		0x20

#ifndef __ASSEMBLY__
extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
					unsigned long tsb_desc_ra);
#endif

/* mmu_tsb_ctxnon0()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_TSB_CTXNON0
 * ARG0:	number of TSB descriptions
 * ARG1:	TSB descriptions pointer
 * RET0:	status
 * ERRORS:	Same as for mmu_tsb_ctx0() above.
 *
 * Configures the TSBs for the current CPU for virtual addresses with
 * non-zero contexts.  The TSB descriptions pointer is a pointer to an
 * array of the given number of TSB descriptions.
 *
 * Note: A maximum of 16 TSBs may be specified in the TSB description list.
 */
#define HV_FAST_MMU_TSB_CTXNON0		0x21

/* mmu_demap_page()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_DEMAP_PAGE
 * ARG0:	reserved, must be zero
 * ARG1:	reserved, must be zero
 * ARG2:	virtual address
 * ARG3:	mmu context
 * ARG4:	flags (HV_MMU_{IMMU,DMMU})
 * RET0:	status
 * ERRORS:	EINVAL			Invalid virutal address, context, or
 *					flags value
 *		ENOTSUPPORTED		ARG0 or ARG1 is non-zero
 *
 * Demaps any page mapping of the given virtual address in the given
 * mmu context for the current virtual CPU.  Any virtually tagged
 * caches are guaranteed to be kept consistent.  The flags argument
 * determines which TLB (instruction, or data, or both) participate in
 * the operation.
 *
 * ARG0 and ARG1 are both reserved and must be set to zero.
 */
#define HV_FAST_MMU_DEMAP_PAGE		0x22

/* mmu_demap_ctx()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_DEMAP_CTX
 * ARG0:	reserved, must be zero
 * ARG1:	reserved, must be zero
 * ARG2:	mmu context
 * ARG3:	flags (HV_MMU_{IMMU,DMMU})
 * RET0:	status
 * ERRORS:	EINVAL			Invalid context or flags value
 *		ENOTSUPPORTED		ARG0 or ARG1 is non-zero
 *
 * Demaps all non-permanent virtual page mappings previously specified
 * for the given context for the current virtual CPU.  Any virtual
 * tagged caches are guaranteed to be kept consistent.  The flags
 * argument determines which TLB (instruction, or data, or both)
 * participate in the operation.
 *
 * ARG0 and ARG1 are both reserved and must be set to zero.
 */
#define HV_FAST_MMU_DEMAP_CTX		0x23

/* mmu_demap_all()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_DEMAP_ALL
 * ARG0:	reserved, must be zero
 * ARG1:	reserved, must be zero
 * ARG2:	flags (HV_MMU_{IMMU,DMMU})
 * RET0:	status
 * ERRORS:	EINVAL			Invalid flags value
 *		ENOTSUPPORTED		ARG0 or ARG1 is non-zero
 *
 * Demaps all non-permanent virtual page mappings previously specified
 * for the current virtual CPU.  Any virtual tagged caches are
 * guaranteed to be kept consistent.  The flags argument determines
 * which TLB (instruction, or data, or both) participate in the
 * operation.
 *
 * ARG0 and ARG1 are both reserved and must be set to zero.
 */
#define HV_FAST_MMU_DEMAP_ALL		0x24

#ifndef __ASSEMBLY__
extern void sun4v_mmu_demap_all(void);
#endif

/* mmu_map_perm_addr()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_MAP_PERM_ADDR
 * ARG0:	virtual address
 * ARG1:	reserved, must be zero
 * ARG2:	TTE
 * ARG3:	flags (HV_MMU_{IMMU,DMMU})
 * RET0:	status
 * ERRORS:	EINVAL			Invalid virutal address or flags value
 *		EBADPGSZ		Invalid page size value
 *		ENORADDR		Invalid real address in TTE
 *		ETOOMANY		Too many mappings (max of 8 reached)
 *
 * Create a permanent mapping using the given TTE and virtual address
 * for context 0 on the calling virtual CPU.  A maximum of 8 such
 * permanent mappings may be specified by privileged code.  Mappings
 * may be removed with mmu_unmap_perm_addr().
 *
 * The behavior is undefined if a TTE with the valid bit clear is given.
 *
 * Note: This call is used to specify address space mappings for which
 *       privileged code does not expect to receive misses.  For example,
 *       this mechanism can be used to map kernel nucleus code and data.
 */
#define HV_FAST_MMU_MAP_PERM_ADDR	0x25

#ifndef __ASSEMBLY__
extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
					     unsigned long set_to_zero,
					     unsigned long tte,
					     unsigned long flags);
#endif

/* mmu_fault_area_conf()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_FAULT_AREA_CONF
 * ARG0:	real address
 * RET0:	status
 * RET1:	previous mmu fault area real address
 * ERRORS:	ENORADDR		Invalid real address
 *		EBADALIGN		Invalid alignment for fault area
 *
 * Configure the MMU fault status area for the calling CPU.  A 64-byte
 * aligned real address specifies where MMU fault status information
 * is placed.  The return value is the previously specified area, or 0
 * for the first invocation.  Specifying a fault area at real address
 * 0 is not allowed.
 */
#define HV_FAST_MMU_FAULT_AREA_CONF	0x26

/* mmu_enable()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_ENABLE
 * ARG0:	enable flag
 * ARG1:	return target address
 * RET0:	status
 * ERRORS:	ENORADDR		Invalid real address when disabling
 *					translation.
 *		EBADALIGN		The return target address is not
 *					aligned to an instruction.
 *		EINVAL			The enable flag request the current
 *					operating mode (e.g. disable if already
 *					disabled)
 *
 * Enable or disable virtual address translation for the calling CPU
 * within the virtual machine domain.  If the enable flag is zero,
 * translation is disabled, any non-zero value will enable
 * translation.
 *
 * When this function returns, the newly selected translation mode
 * will be active.  If the mmu is being enabled, then the return
 * target address is a virtual address else it is a real address.
 *
 * Upon successful completion, control will be returned to the given
 * return target address (ie. the cpu will jump to that address).  On
 * failure, the previous mmu mode remains and the trap simply returns
 * as normal with the appropriate error code in RET0.
 */
#define HV_FAST_MMU_ENABLE		0x27

/* mmu_unmap_perm_addr()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_UNMAP_PERM_ADDR
 * ARG0:	virtual address
 * ARG1:	reserved, must be zero
 * ARG2:	flags (HV_MMU_{IMMU,DMMU})
 * RET0:	status
 * ERRORS:	EINVAL			Invalid virutal address or flags value
 *		ENOMAP			Specified mapping was not found
 *
 * Demaps any permanent page mapping (established via
 * mmu_map_perm_addr()) at the given virtual address for context 0 on
 * the current virtual CPU.  Any virtual tagged caches are guaranteed
 * to be kept consistent.
 */
#define HV_FAST_MMU_UNMAP_PERM_ADDR	0x28

/* mmu_tsb_ctx0_info()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_TSB_CTX0_INFO
 * ARG0:	max TSBs
 * ARG1:	buffer pointer
 * RET0:	status
 * RET1:	number of TSBs
 * ERRORS:	EINVAL			Supplied buffer is too small
 *		EBADALIGN		The buffer pointer is badly aligned
 *		ENORADDR		Invalid real address for buffer pointer
 *
 * Return the TSB configuration as previous defined by mmu_tsb_ctx0()
 * into the provided buffer.  The size of the buffer is given in ARG1
 * in terms of the number of TSB description entries.
 *
 * Upon return, RET1 always contains the number of TSB descriptions
 * previously configured.  If zero TSBs were configured, EOK is
 * returned with RET1 containing 0.
 */
#define HV_FAST_MMU_TSB_CTX0_INFO	0x29

/* mmu_tsb_ctxnon0_info()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_TSB_CTXNON0_INFO
 * ARG0:	max TSBs
 * ARG1:	buffer pointer
 * RET0:	status
 * RET1:	number of TSBs
 * ERRORS:	EINVAL			Supplied buffer is too small
 *		EBADALIGN		The buffer pointer is badly aligned
 *		ENORADDR		Invalid real address for buffer pointer
 *
 * Return the TSB configuration as previous defined by
 * mmu_tsb_ctxnon0() into the provided buffer.  The size of the buffer
 * is given in ARG1 in terms of the number of TSB description entries.
 *
 * Upon return, RET1 always contains the number of TSB descriptions
 * previously configured.  If zero TSBs were configured, EOK is
 * returned with RET1 containing 0.
 */
#define HV_FAST_MMU_TSB_CTXNON0_INFO	0x2a

/* mmu_fault_area_info()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMU_FAULT_AREA_INFO
 * RET0:	status
 * RET1:	fault area real address
 * ERRORS:	No errors defined.
 *
 * Return the currently defined MMU fault status area for the current
 * CPU.  The real address of the fault status area is returned in
 * RET1, or 0 is returned in RET1 if no fault status area is defined.
 *
 * Note: mmu_fault_area_conf() may be called with the return value (RET1)
 *       from this service if there is a need to save and restore the fault
 *	 area for a cpu.
 */
#define HV_FAST_MMU_FAULT_AREA_INFO	0x2b

/* Cache and Memory services. */

/* mem_scrub()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MEM_SCRUB
 * ARG0:	real address
 * ARG1:	length
 * RET0:	status
 * RET1:	length scrubbed
 * ERRORS:	ENORADDR	Invalid real address
 *		EBADALIGN	Start address or length are not correctly
 *				aligned
 *		EINVAL		Length is zero
 *
 * Zero the memory contents in the range real address to real address
 * plus length minus 1.  Also, valid ECC will be generated for that
 * memory address range.  Scrubbing is started at the given real
 * address, but may not scrub the entire given length.  The actual
 * length scrubbed will be returned in RET1.
 *
 * The real address and length must be aligned on an 8K boundary, or
 * contain the start address and length from a sun4v error report.
 *
 * Note: There are two uses for this function.  The first use is to block clear
 *       and initialize memory and the second is to scrub an u ncorrectable
 *       error reported via a resumable or non-resumable trap.  The second
 *       use requires the arguments to be equal to the real address and length
 *       provided in a sun4v memory error report.
 */
#define HV_FAST_MEM_SCRUB		0x31

/* mem_sync()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MEM_SYNC
 * ARG0:	real address
 * ARG1:	length
 * RET0:	status
 * RET1:	length synced
 * ERRORS:	ENORADDR	Invalid real address
 *		EBADALIGN	Start address or length are not correctly
 *				aligned
 *		EINVAL		Length is zero
 *
 * Force the next access within the real address to real address plus
 * length minus 1 to be fetches from main system memory.  Less than
 * the given length may be synced, the actual amount synced is
 * returned in RET1.  The real address and length must be aligned on
 * an 8K boundary.
 */
#define HV_FAST_MEM_SYNC		0x32

/* Time of day services.
 *
 * The hypervisor maintains the time of day on a per-domain basis.
 * Changing the time of day in one domain does not affect the time of
 * day on any other domain.
 *
 * Time is described by a single unsigned 64-bit word which is the
 * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1,
 * 1970).
 */

/* tod_get()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_TOD_GET
 * RET0:	status
 * RET1:	TOD
 * ERRORS:	EWOULDBLOCK	TOD resource is temporarily unavailable
 *		ENOTSUPPORTED	If TOD not supported on this platform
 *
 * Return the current time of day.  May block if TOD access is
 * temporarily not possible.
 */
#define HV_FAST_TOD_GET			0x50

#ifndef __ASSEMBLY__
extern unsigned long sun4v_tod_get(unsigned long *time);
#endif

/* tod_set()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_TOD_SET
 * ARG0:	TOD
 * RET0:	status
 * ERRORS:	EWOULDBLOCK	TOD resource is temporarily unavailable
 *		ENOTSUPPORTED	If TOD not supported on this platform
 *
 * The current time of day is set to the value specified in ARG0.  May
 * block if TOD access is temporarily not possible.
 */
#define HV_FAST_TOD_SET			0x51

#ifndef __ASSEMBLY__
extern unsigned long sun4v_tod_set(unsigned long time);
#endif

/* Console services */

/* con_getchar()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CONS_GETCHAR
 * RET0:	status
 * RET1:	character
 * ERRORS:	EWOULDBLOCK	No character available.
 *
 * Returns a character from the console device.  If no character is
 * available then an EWOULDBLOCK error is returned.  If a character is
 * available, then the returned status is EOK and the character value
 * is in RET1.
 *
 * A virtual BREAK is represented by the 64-bit value -1.
 *
 * A virtual HUP signal is represented by the 64-bit value -2.
 */
#define HV_FAST_CONS_GETCHAR		0x60

/* con_putchar()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CONS_PUTCHAR
 * ARG0:	character
 * RET0:	status
 * ERRORS:	EINVAL		Illegal character
 *		EWOULDBLOCK	Output buffer currently full, would block
 *
 * Send a character to the console device.  Only character values
 * between 0 and 255 may be used.  Values outside this range are
 * invalid except for the 64-bit value -1 which is used to send a
 * virtual BREAK.
 */
#define HV_FAST_CONS_PUTCHAR		0x61

/* con_read()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CONS_READ
 * ARG0:	buffer real address
 * ARG1:	buffer size in bytes
 * RET0:	status
 * RET1:	bytes read or BREAK or HUP
 * ERRORS:	EWOULDBLOCK	No character available.
 *
 * Reads characters into a buffer from the console device.  If no
 * character is available then an EWOULDBLOCK error is returned.
 * If a character is available, then the returned status is EOK
 * and the number of bytes read into the given buffer is provided
 * in RET1.
 *
 * A virtual BREAK is represented by the 64-bit RET1 value -1.
 *
 * A virtual HUP signal is represented by the 64-bit RET1 value -2.
 *
 * If BREAK or HUP are indicated, no bytes were read into buffer.
 */
#define HV_FAST_CONS_READ		0x62

/* con_write()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CONS_WRITE
 * ARG0:	buffer real address
 * ARG1:	buffer size in bytes
 * RET0:	status
 * RET1:	bytes written
 * ERRORS:	EWOULDBLOCK	Output buffer currently full, would block
 *
 * Send a characters in buffer to the console device.  Breaks must be
 * sent using con_putchar().
 */
#define HV_FAST_CONS_WRITE		0x63

#ifndef __ASSEMBLY__
extern long sun4v_con_getchar(long *status);
extern long sun4v_con_putchar(long c);
extern long sun4v_con_read(unsigned long buffer,
			   unsigned long size,
			   unsigned long *bytes_read);
extern unsigned long sun4v_con_write(unsigned long buffer,
				     unsigned long size,
				     unsigned long *bytes_written);
#endif

/* mach_set_soft_state()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MACH_SET_SOFT_STATE
 * ARG0:	software state
 * ARG1:	software state description pointer
 * RET0:	status
 * ERRORS:	EINVAL		software state not valid or software state
 *				description is not NULL terminated
 *		ENORADDR	software state description pointer is not a
 *				valid real address
 *		EBADALIGNED	software state description is not correctly
 *				aligned
 *
 * This allows the guest to report it's soft state to the hypervisor.  There
 * are two primary components to this state.  The first part states whether
 * the guest software is running or not.  The second containts optional
 * details specific to the software.
 *
 * The software state argument is defined below in HV_SOFT_STATE_*, and
 * indicates whether the guest is operating normally or in a transitional
 * state.
 *
 * The software state description argument is a real address of a data buffer
 * of size 32-bytes aligned on a 32-byte boundary.  It is treated as a NULL
 * terminated 7-bit ASCII string of up to 31 characters not including the
 * NULL termination.
 */
#define HV_FAST_MACH_SET_SOFT_STATE	0x70
#define  HV_SOFT_STATE_NORMAL		 0x01
#define  HV_SOFT_STATE_TRANSITION	 0x02

#ifndef __ASSEMBLY__
extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
					       unsigned long msg_string_ra);
#endif

/* mach_get_soft_state()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MACH_GET_SOFT_STATE
 * ARG0:	software state description pointer
 * RET0:	status
 * RET1:	software state
 * ERRORS:	ENORADDR	software state description pointer is not a
 *				valid real address
 *		EBADALIGNED	software state description is not correctly
 *				aligned
 *
 * Retrieve the current value of the guest's software state.  The rules
 * for the software state pointer are the same as for mach_set_soft_state()
 * above.
 */
#define HV_FAST_MACH_GET_SOFT_STATE	0x71

/* svc_send()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_SVC_SEND
 * ARG0:	service ID
 * ARG1:	buffer real address
 * ARG2:	buffer size
 * RET0:	STATUS
 * RET1:	sent_bytes
 *
 * Be careful, all output registers are clobbered by this operation,
 * so for example it is not possible to save away a value in %o4
 * across the trap.
 */
#define HV_FAST_SVC_SEND		0x80

/* svc_recv()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_SVC_RECV
 * ARG0:	service ID
 * ARG1:	buffer real address
 * ARG2:	buffer size
 * RET0:	STATUS
 * RET1:	recv_bytes
 *
 * Be careful, all output registers are clobbered by this operation,
 * so for example it is not possible to save away a value in %o4
 * across the trap.
 */
#define HV_FAST_SVC_RECV		0x81

/* svc_getstatus()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_SVC_GETSTATUS
 * ARG0:	service ID
 * RET0:	STATUS
 * RET1:	status bits
 */
#define HV_FAST_SVC_GETSTATUS		0x82

/* svc_setstatus()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_SVC_SETSTATUS
 * ARG0:	service ID
 * ARG1:	bits to set
 * RET0:	STATUS
 */
#define HV_FAST_SVC_SETSTATUS		0x83

/* svc_clrstatus()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_SVC_CLRSTATUS
 * ARG0:	service ID
 * ARG1:	bits to clear
 * RET0:	STATUS
 */
#define HV_FAST_SVC_CLRSTATUS		0x84

#ifndef __ASSEMBLY__
extern unsigned long sun4v_svc_send(unsigned long svc_id,
				    unsigned long buffer,
				    unsigned long buffer_size,
				    unsigned long *sent_bytes);
extern unsigned long sun4v_svc_recv(unsigned long svc_id,
				    unsigned long buffer,
				    unsigned long buffer_size,
				    unsigned long *recv_bytes);
extern unsigned long sun4v_svc_getstatus(unsigned long svc_id,
					 unsigned long *status_bits);
extern unsigned long sun4v_svc_setstatus(unsigned long svc_id,
					 unsigned long status_bits);
extern unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
					 unsigned long status_bits);
#endif

/* Trap trace services.
 *
 * The hypervisor provides a trap tracing capability for privileged
 * code running on each virtual CPU.  Privileged code provides a
 * round-robin trap trace queue within which the hypervisor writes
 * 64-byte entries detailing hyperprivileged traps taken n behalf of
 * privileged code.  This is provided as a debugging capability for
 * privileged code.
 *
 * The trap trace control structure is 64-bytes long and placed at the
 * start (offset 0) of the trap trace buffer, and is described as
 * follows:
 */
#ifndef __ASSEMBLY__
struct hv_trap_trace_control {
	unsigned long		head_offset;
	unsigned long		tail_offset;
	unsigned long		__reserved[0x30 / sizeof(unsigned long)];
};
#endif
#define HV_TRAP_TRACE_CTRL_HEAD_OFFSET	0x00
#define HV_TRAP_TRACE_CTRL_TAIL_OFFSET	0x08

/* The head offset is the offset of the most recently completed entry
 * in the trap-trace buffer.  The tail offset is the offset of the
 * next entry to be written.  The control structure is owned and
 * modified by the hypervisor.  A guest may not modify the control
 * structure contents.  Attempts to do so will result in undefined
 * behavior for the guest.
 *
 * Each trap trace buffer entry is layed out as follows:
 */
#ifndef __ASSEMBLY__
struct hv_trap_trace_entry {
	unsigned char	type;		/* Hypervisor or guest entry?	*/
	unsigned char	hpstate;	/* Hyper-privileged state	*/
	unsigned char	tl;		/* Trap level			*/
	unsigned char	gl;		/* Global register level	*/
	unsigned short	tt;		/* Trap type			*/
	unsigned short	tag;		/* Extended trap identifier	*/
	unsigned long	tstate;		/* Trap state			*/
	unsigned long	tick;		/* Tick				*/
	unsigned long	tpc;		/* Trap PC			*/
	unsigned long	f1;		/* Entry specific		*/
	unsigned long	f2;		/* Entry specific		*/
	unsigned long	f3;		/* Entry specific		*/
	unsigned long	f4;		/* Entry specific		*/
};
#endif
#define HV_TRAP_TRACE_ENTRY_TYPE	0x00
#define HV_TRAP_TRACE_ENTRY_HPSTATE	0x01
#define HV_TRAP_TRACE_ENTRY_TL		0x02
#define HV_TRAP_TRACE_ENTRY_GL		0x03
#define HV_TRAP_TRACE_ENTRY_TT		0x04
#define HV_TRAP_TRACE_ENTRY_TAG		0x06
#define HV_TRAP_TRACE_ENTRY_TSTATE	0x08
#define HV_TRAP_TRACE_ENTRY_TICK	0x10
#define HV_TRAP_TRACE_ENTRY_TPC		0x18
#define HV_TRAP_TRACE_ENTRY_F1		0x20
#define HV_TRAP_TRACE_ENTRY_F2		0x28
#define HV_TRAP_TRACE_ENTRY_F3		0x30
#define HV_TRAP_TRACE_ENTRY_F4		0x38

/* The type field is encoded as follows.  */
#define HV_TRAP_TYPE_UNDEF		0x00 /* Entry content undefined     */
#define HV_TRAP_TYPE_HV			0x01 /* Hypervisor trap entry       */
#define HV_TRAP_TYPE_GUEST		0xff /* Added via ttrace_addentry() */

/* ttrace_buf_conf()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_TTRACE_BUF_CONF
 * ARG0:	real address
 * ARG1:	number of entries
 * RET0:	status
 * RET1:	number of entries
 * ERRORS:	ENORADDR	Invalid real address
 *		EINVAL		Size is too small
 *		EBADALIGN	Real address not aligned on 64-byte boundary
 *
 * Requests hypervisor trap tracing and declares a virtual CPU's trap
 * trace buffer to the hypervisor.  The real address supplies the real
 * base address of the trap trace queue and must be 64-byte aligned.
 * Specifying a value of 0 for the number of entries disables trap
 * tracing for the calling virtual CPU.  The buffer allocated must be
 * sized for a power of two number of 64-byte trap trace entries plus
 * an initial 64-byte control structure.
 * 
 * This may be invoked any number of times so that a virtual CPU may
 * relocate a trap trace buffer or create "snapshots" of information.
 *
 * If the real address is illegal or badly aligned, then trap tracing
 * is disabled and an error is returned.
 *
 * Upon failure with EINVAL, this service call returns in RET1 the
 * minimum number of buffer entries required.  Upon other failures
 * RET1 is undefined.
 */
#define HV_FAST_TTRACE_BUF_CONF		0x90

/* ttrace_buf_info()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_TTRACE_BUF_INFO
 * RET0:	status
 * RET1:	real address
 * RET2:	size
 * ERRORS:	None defined.
 *
 * Returns the size and location of the previously declared trap-trace
 * buffer.  In the event that no buffer was previously defined, or the
 * buffer is disabled, this call will return a size of zero bytes.
 */
#define HV_FAST_TTRACE_BUF_INFO		0x91

/* ttrace_enable()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_TTRACE_ENABLE
 * ARG0:	enable
 * RET0:	status
 * RET1:	previous enable state
 * ERRORS:	EINVAL		No trap trace buffer currently defined
 *
 * Enable or disable trap tracing, and return the previous enabled
 * state in RET1.  Future systems may define various flags for the
 * enable argument (ARG0), for the moment a guest should pass
 * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all
 * tracing - which will ensure future compatability.
 */
#define HV_FAST_TTRACE_ENABLE		0x92

/* ttrace_freeze()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_TTRACE_FREEZE
 * ARG0:	freeze
 * RET0:	status
 * RET1:	previous freeze state
 * ERRORS:	EINVAL		No trap trace buffer currently defined
 *
 * Freeze or unfreeze trap tracing, returning the previous freeze
 * state in RET1.  A guest should pass a non-zero value to freeze and
 * a zero value to unfreeze all tracing.  The returned previous state
 * is 0 for not frozen and 1 for frozen.
 */
#define HV_FAST_TTRACE_FREEZE		0x93

/* ttrace_addentry()
 * TRAP:	HV_TTRACE_ADDENTRY_TRAP
 * ARG0:	tag (16-bits)
 * ARG1:	data word 0
 * ARG2:	data word 1
 * ARG3:	data word 2
 * ARG4:	data word 3
 * RET0:	status
 * ERRORS:	EINVAL		No trap trace buffer currently defined
 *
 * Add an entry to the trap trace buffer.  Upon return only ARG0/RET0
 * is modified - none of the other registers holding arguments are
 * volatile across this hypervisor service.
 */

/* Core dump services.
 *
 * Since the hypervisor viraulizes and thus obscures a lot of the
 * physical machine layout and state, traditional OS crash dumps can
 * be difficult to diagnose especially when the problem is a
 * configuration error of some sort.
 *
 * The dump services provide an opaque buffer into which the
 * hypervisor can place it's internal state in order to assist in
 * debugging such situations.  The contents are opaque and extremely
 * platform and hypervisor implementation specific.  The guest, during
 * a core dump, requests that the hypervisor update any information in
 * the dump buffer in preparation to being dumped as part of the
 * domain's memory image.
 */

/* dump_buf_update()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_DUMP_BUF_UPDATE
 * ARG0:	real address
 * ARG1:	size
 * RET0:	status
 * RET1:	required size of dump buffer
 * ERRORS:	ENORADDR	Invalid real address
 *		EBADALIGN	Real address is not aligned on a 64-byte
 *				boundary
 *		EINVAL		Size is non-zero but less than minimum size
 *				required
 *		ENOTSUPPORTED	Operation not supported on current logical
 *				domain
 *
 * Declare a domain dump buffer to the hypervisor.  The real address
 * provided for the domain dump buffer must be 64-byte aligned.  The
 * size specifies the size of the dump buffer and may be larger than
 * the minimum size specified in the machine description.  The
 * hypervisor will fill the dump buffer with opaque data.
 *
 * Note: A guest may elect to include dump buffer contents as part of a crash
 *       dump to assist with debugging.  This function may be called any number
 *       of times so that a guest may relocate a dump buffer, or create
 *       "snapshots" of any dump-buffer information.  Each call to
 *       dump_buf_update() atomically declares the new dump buffer to the
 *       hypervisor.
 *
 * A specified size of 0 unconfigures the dump buffer.  If the real
 * address is illegal or badly aligned, then any currently active dump
 * buffer is disabled and an error is returned.
 *
 * In the event that the call fails with EINVAL, RET1 contains the
 * minimum size requires by the hypervisor for a valid dump buffer.
 */
#define HV_FAST_DUMP_BUF_UPDATE		0x94

/* dump_buf_info()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_DUMP_BUF_INFO
 * RET0:	status
 * RET1:	real address of current dump buffer
 * RET2:	size of current dump buffer
 * ERRORS:	No errors defined.
 *
 * Return the currently configures dump buffer description.  A
 * returned size of 0 bytes indicates an undefined dump buffer.  In
 * this case the return address in RET1 is undefined.
 */
#define HV_FAST_DUMP_BUF_INFO		0x95

/* Device interrupt services.
 *
 * Device interrupts are allocated to system bus bridges by the hypervisor,
 * and described to OBP in the machine description.  OBP then describes
 * these interrupts to the OS via properties in the device tree.
 *
 * Terminology:
 *
 *	cpuid		Unique opaque value which represents a target cpu.
 *
 *	devhandle	Device handle.  It uniquely identifies a device, and
 *			consistes of the lower 28-bits of the hi-cell of the
 *			first entry of the device's "reg" property in the
 *			OBP device tree.
 *
 *	devino		Device interrupt number.  Specifies the relative
 *			interrupt number within the device.  The unique
 *			combination of devhandle and devino are used to
 *			identify a specific device interrupt.
 *
 *			Note: The devino value is the same as the values in the
 *			      "interrupts" property or "interrupt-map" property
 *			      in the OBP device tree for that device.
 *
 *	sysino		System interrupt number.  A 64-bit unsigned interger
 *			representing a unique interrupt within a virtual
 *			machine.
 *
 *	intr_state	A flag representing the interrupt state for a given
 *			sysino.  The state values are defined below.
 *
 *	intr_enabled	A flag representing the 'enabled' state for a given
 *			sysino.  The enable values are defined below.
 */

#define HV_INTR_STATE_IDLE		0 /* Nothing pending */
#define HV_INTR_STATE_RECEIVED		1 /* Interrupt received by hardware */
#define HV_INTR_STATE_DELIVERED		2 /* Interrupt delivered to queue */

#define HV_INTR_DISABLED		0 /* sysino not enabled */
#define HV_INTR_ENABLED			1 /* sysino enabled */

/* intr_devino_to_sysino()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_INTR_DEVINO2SYSINO
 * ARG0:	devhandle
 * ARG1:	devino
 * RET0:	status
 * RET1:	sysino
 * ERRORS:	EINVAL		Invalid devhandle/devino
 *
 * Converts a device specific interrupt number of the given
 * devhandle/devino into a system specific ino (sysino).
 */
#define HV_FAST_INTR_DEVINO2SYSINO	0xa0

#ifndef __ASSEMBLY__
extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
					    unsigned long devino);
#endif

/* intr_getenabled()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_INTR_GETENABLED
 * ARG0:	sysino
 * RET0:	status
 * RET1:	intr_enabled (HV_INTR_{DISABLED,ENABLED})
 * ERRORS:	EINVAL		Invalid sysino
 *
 * Returns interrupt enabled state in RET1 for the interrupt defined
 * by the given sysino.
 */
#define HV_FAST_INTR_GETENABLED		0xa1

#ifndef __ASSEMBLY__
extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
#endif

/* intr_setenabled()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_INTR_SETENABLED
 * ARG0:	sysino
 * ARG1:	intr_enabled (HV_INTR_{DISABLED,ENABLED})
 * RET0:	status
 * ERRORS:	EINVAL		Invalid sysino or intr_enabled value
 *
 * Set the 'enabled' state of the interrupt sysino.
 */
#define HV_FAST_INTR_SETENABLED		0xa2

#ifndef __ASSEMBLY__
extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled);
#endif

/* intr_getstate()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_INTR_GETSTATE
 * ARG0:	sysino
 * RET0:	status
 * RET1:	intr_state (HV_INTR_STATE_*)
 * ERRORS:	EINVAL		Invalid sysino
 *
 * Returns current state of the interrupt defined by the given sysino.
 */
#define HV_FAST_INTR_GETSTATE		0xa3

#ifndef __ASSEMBLY__
extern unsigned long sun4v_intr_getstate(unsigned long sysino);
#endif

/* intr_setstate()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_INTR_SETSTATE
 * ARG0:	sysino
 * ARG1:	intr_state (HV_INTR_STATE_*)
 * RET0:	status
 * ERRORS:	EINVAL		Invalid sysino or intr_state value
 *
 * Sets the current state of the interrupt described by the given sysino
 * value.
 *
 * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending
 *       interrupt for sysino.
 */
#define HV_FAST_INTR_SETSTATE		0xa4

#ifndef __ASSEMBLY__
extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
#endif

/* intr_gettarget()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_INTR_GETTARGET
 * ARG0:	sysino
 * RET0:	status
 * RET1:	cpuid
 * ERRORS:	EINVAL		Invalid sysino
 *
 * Returns CPU that is the current target of the interrupt defined by
 * the given sysino.  The CPU value returned is undefined if the target
 * has not been set via intr_settarget().
 */
#define HV_FAST_INTR_GETTARGET		0xa5

#ifndef __ASSEMBLY__
extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
#endif

/* intr_settarget()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_INTR_SETTARGET
 * ARG0:	sysino
 * ARG1:	cpuid
 * RET0:	status
 * ERRORS:	EINVAL		Invalid sysino
 *		ENOCPU		Invalid cpuid
 *
 * Set the target CPU for the interrupt defined by the given sysino.
 */
#define HV_FAST_INTR_SETTARGET		0xa6

#ifndef __ASSEMBLY__
extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
#endif

/* vintr_get_cookie()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_VINTR_GET_COOKIE
 * ARG0:	device handle
 * ARG1:	device ino
 * RET0:	status
 * RET1:	cookie
 */
#define HV_FAST_VINTR_GET_COOKIE	0xa7

/* vintr_set_cookie()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_VINTR_SET_COOKIE
 * ARG0:	device handle
 * ARG1:	device ino
 * ARG2:	cookie
 * RET0:	status
 */
#define HV_FAST_VINTR_SET_COOKIE	0xa8

/* vintr_get_valid()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_VINTR_GET_VALID
 * ARG0:	device handle
 * ARG1:	device ino
 * RET0:	status
 * RET1:	valid state
 */
#define HV_FAST_VINTR_GET_VALID		0xa9

/* vintr_set_valid()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_VINTR_SET_VALID
 * ARG0:	device handle
 * ARG1:	device ino
 * ARG2:	valid state
 * RET0:	status
 */
#define HV_FAST_VINTR_SET_VALID		0xaa

/* vintr_get_state()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_VINTR_GET_STATE
 * ARG0:	device handle
 * ARG1:	device ino
 * RET0:	status
 * RET1:	state
 */
#define HV_FAST_VINTR_GET_STATE		0xab

/* vintr_set_state()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_VINTR_SET_STATE
 * ARG0:	device handle
 * ARG1:	device ino
 * ARG2:	state
 * RET0:	status
 */
#define HV_FAST_VINTR_SET_STATE		0xac

/* vintr_get_target()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_VINTR_GET_TARGET
 * ARG0:	device handle
 * ARG1:	device ino
 * RET0:	status
 * RET1:	cpuid
 */
#define HV_FAST_VINTR_GET_TARGET	0xad

/* vintr_set_target()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_VINTR_SET_TARGET
 * ARG0:	device handle
 * ARG1:	device ino
 * ARG2:	cpuid
 * RET0:	status
 */
#define HV_FAST_VINTR_SET_TARGET	0xae

#ifndef __ASSEMBLY__
extern unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
					    unsigned long dev_ino,
					    unsigned long *cookie);
extern unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
					    unsigned long dev_ino,
					    unsigned long cookie);
extern unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
					   unsigned long dev_ino,
					   unsigned long *valid);
extern unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
					   unsigned long dev_ino,
					   unsigned long valid);
extern unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
					   unsigned long dev_ino,
					   unsigned long *state);
extern unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
					   unsigned long dev_ino,
					   unsigned long state);
extern unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
					    unsigned long dev_ino,
					    unsigned long *cpuid);
extern unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
					    unsigned long dev_ino,
					    unsigned long cpuid);
#endif

/* PCI IO services.
 *
 * See the terminology descriptions in the device interrupt services
 * section above as those apply here too.  Here are terminology
 * definitions specific to these PCI IO services:
 *
 *	tsbnum		TSB number.  Indentifies which io-tsb is used.
 *			For this version of the specification, tsbnum
 *			must be zero.
 *
 *	tsbindex	TSB index.  Identifies which entry in the TSB
 *			is used.  The first entry is zero.
 *
 *	tsbid		A 64-bit aligned data structure which contains
 *			a tsbnum and a tsbindex.  Bits 63:32 contain the
 *			tsbnum and bits 31:00 contain the tsbindex.
 *
 *			Use the HV_PCI_TSBID() macro to construct such
 * 			values.
 *
 *	io_attributes	IO attributes for IOMMU mappings.  One of more
 *			of the attritbute bits are stores in a 64-bit
 *			value.  The values are defined below.
 *
 *	r_addr		64-bit real address
 *
 *	pci_device	PCI device address.  A PCI device address identifies
 *			a specific device on a specific PCI bus segment.
 *			A PCI device address ia a 32-bit unsigned integer
 *			with the following format:
 *
 *				00000000.bbbbbbbb.dddddfff.00000000
 *
 *			Use the HV_PCI_DEVICE_BUILD() macro to construct
 *			such values.
 *
 *	pci_config_offset
 *			PCI configureation space offset.  For conventional
 *			PCI a value between 0 and 255.  For extended
 *			configuration space, a value between 0 and 4095.
 *
 *			Note: For PCI configuration space accesses, the offset
 *			      must be aligned to the access size.
 *
 *	error_flag	A return value which specifies if the action succeeded
 *			or failed.  0 means no error, non-0 means some error
 *			occurred while performing the service.
 *
 *	io_sync_direction
 *			Direction definition for pci_dma_sync(), defined
 *			below in HV_PCI_SYNC_*.
 *
 *	io_page_list	A list of io_page_addresses, an io_page_address is
 *			a real address.
 *
 *	io_page_list_p	A pointer to an io_page_list.
 *
 *	"size based byte swap" - Some functions do size based byte swapping
 *				 which allows sw to access pointers and
 *				 counters in native form when the processor
 *				 operates in a different endianness than the
 *				 IO bus.  Size-based byte swapping converts a
 *				 multi-byte field between big-endian and
 *				 little-endian format.
 */

#define HV_PCI_MAP_ATTR_READ		0x01
#define HV_PCI_MAP_ATTR_WRITE		0x02

#define HV_PCI_DEVICE_BUILD(b,d,f)	\
	((((b) & 0xff) << 16) | \
	 (((d) & 0x1f) << 11) | \
	 (((f) & 0x07) <<  8))

#define HV_PCI_TSBID(__tsb_num, __tsb_index) \
	((((u64)(__tsb_num)) << 32UL) | ((u64)(__tsb_index)))

#define HV_PCI_SYNC_FOR_DEVICE		0x01
#define HV_PCI_SYNC_FOR_CPU		0x02

/* pci_iommu_map()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_IOMMU_MAP
 * ARG0:	devhandle
 * ARG1:	tsbid
 * ARG2:	#ttes
 * ARG3:	io_attributes
 * ARG4:	io_page_list_p
 * RET0:	status
 * RET1:	#ttes mapped
 * ERRORS:	EINVAL		Invalid devhandle/tsbnum/tsbindex/io_attributes
 *		EBADALIGN	Improperly aligned real address
 *		ENORADDR	Invalid real address
 *
 * Create IOMMU mappings in the sun4v device defined by the given
 * devhandle.  The mappings are created in the TSB defined by the
 * tsbnum component of the given tsbid.  The first mapping is created
 * in the TSB i ndex defined by the tsbindex component of the given tsbid.
 * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex,
 * the second at tsbnum, tsbindex + 1, etc.
 *
 * All mappings are created with the attributes defined by the io_attributes
 * argument.  The page mapping addresses are described in the io_page_list
 * defined by the given io_page_list_p, which is a pointer to the io_page_list.
 * The first entry in the io_page_list is the address for the first iotte, the
 * 2nd for the 2nd iotte, and so on.
 *
 * Each io_page_address in the io_page_list must be appropriately aligned.
 * #ttes must be greater than zero.  For this version of the spec, the tsbnum
 * component of the given tsbid must be zero.
 *
 * Returns the actual number of mappings creates, which may be less than
 * or equal to the argument #ttes.  If the function returns a value which
 * is less than the #ttes, the caller may continus to call the function with
 * an updated tsbid, #ttes, io_page_list_p arguments until all pages are
 * mapped.
 *
 * Note: This function does not imply an iotte cache flush.  The guest must
 *       demap an entry before re-mapping it.
 */
#define HV_FAST_PCI_IOMMU_MAP		0xb0

/* pci_iommu_demap()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_IOMMU_DEMAP
 * ARG0:	devhandle
 * ARG1:	tsbid
 * ARG2:	#ttes
 * RET0:	status
 * RET1:	#ttes demapped
 * ERRORS:	EINVAL		Invalid devhandle/tsbnum/tsbindex
 *
 * Demap and flush IOMMU mappings in the device defined by the given
 * devhandle.  Demaps up to #ttes entries in the TSB defined by the tsbnum
 * component of the given tsbid, starting at the TSB index defined by the
 * tsbindex component of the given tsbid.
 *
 * For this version of the spec, the tsbnum of the given tsbid must be zero.
 * #ttes must be greater than zero.
 *
 * Returns the actual number of ttes demapped, which may be less than or equal
 * to the argument #ttes.  If #ttes demapped is less than #ttes, the caller
 * may continue to call this function with updated tsbid and #ttes arguments
 * until all pages are demapped.
 *
 * Note: Entries do not have to be mapped to be demapped.  A demap of an
 *       unmapped page will flush the entry from the tte cache.
 */
#define HV_FAST_PCI_IOMMU_DEMAP		0xb1

/* pci_iommu_getmap()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_IOMMU_GETMAP
 * ARG0:	devhandle
 * ARG1:	tsbid
 * RET0:	status
 * RET1:	io_attributes
 * RET2:	real address
 * ERRORS:	EINVAL		Invalid devhandle/tsbnum/tsbindex
 *		ENOMAP		Mapping is not valid, no translation exists
 *
 * Read and return the mapping in the device described by the given devhandle
 * and tsbid.  If successful, the io_attributes shall be returned in RET1
 * and the page address of the mapping shall be returned in RET2.
 *
 * For this version of the spec, the tsbnum component of the given tsbid
 * must be zero.
 */
#define HV_FAST_PCI_IOMMU_GETMAP	0xb2

/* pci_iommu_getbypass()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_IOMMU_GETBYPASS
 * ARG0:	devhandle
 * ARG1:	real address
 * ARG2:	io_attributes
 * RET0:	status
 * RET1:	io_addr
 * ERRORS:	EINVAL		Invalid devhandle/io_attributes
 *		ENORADDR	Invalid real address
 *		ENOTSUPPORTED	Function not supported in this implementation.
 *
 * Create a "special" mapping in the device described by the given devhandle,
 * for the given real address and attributes.  Return the IO address in RET1
 * if successful.
 */
#define HV_FAST_PCI_IOMMU_GETBYPASS	0xb3

/* pci_config_get()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_CONFIG_GET
 * ARG0:	devhandle
 * ARG1:	pci_device
 * ARG2:	pci_config_offset
 * ARG3:	size
 * RET0:	status
 * RET1:	error_flag
 * RET2:	data
 * ERRORS:	EINVAL		Invalid devhandle/pci_device/offset/size
 *		EBADALIGN	pci_config_offset not size aligned
 *		ENOACCESS	Access to this offset is not permitted
 *
 * Read PCI configuration space for the adapter described by the given
 * devhandle.  Read size (1, 2, or 4) bytes of data from the given
 * pci_device, at pci_config_offset from the beginning of the device's
 * configuration space.  If there was no error, RET1 is set to zero and
 * RET2 is set to the data read.  Insignificant bits in RET2 are not
 * guarenteed to have any specific value and therefore must be ignored.
 *
 * The data returned in RET2 is size based byte swapped.
 *
 * If an error occurs during the read, set RET1 to a non-zero value.  The
 * given pci_config_offset must be 'size' aligned.
 */
#define HV_FAST_PCI_CONFIG_GET		0xb4

/* pci_config_put()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_CONFIG_PUT
 * ARG0:	devhandle
 * ARG1:	pci_device
 * ARG2:	pci_config_offset
 * ARG3:	size
 * ARG4:	data
 * RET0:	status
 * RET1:	error_flag
 * ERRORS:	EINVAL		Invalid devhandle/pci_device/offset/size
 *		EBADALIGN	pci_config_offset not size aligned
 *		ENOACCESS	Access to this offset is not permitted
 *
 * Write PCI configuration space for the adapter described by the given
 * devhandle.  Write size (1, 2, or 4) bytes of data in a single operation,
 * at pci_config_offset from the beginning of the device's configuration
 * space.  The data argument contains the data to be written to configuration
 * space.  Prior to writing, the data is size based byte swapped.
 *
 * If an error occurs during the write access, do not generate an error
 * report, do set RET1 to a non-zero value.  Otherwise RET1 is zero.
 * The given pci_config_offset must be 'size' aligned.
 *
 * This function is permitted to read from offset zero in the configuration
 * space described by the given pci_device if necessary to ensure that the
 * write access to config space completes.
 */
#define HV_FAST_PCI_CONFIG_PUT		0xb5

/* pci_peek()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_PEEK
 * ARG0:	devhandle
 * ARG1:	real address
 * ARG2:	size
 * RET0:	status
 * RET1:	error_flag
 * RET2:	data
 * ERRORS:	EINVAL		Invalid devhandle or size
 *		EBADALIGN	Improperly aligned real address
 *		ENORADDR	Bad real address
 *		ENOACCESS	Guest access prohibited
 *
 * Attempt to read the IO address given by the given devhandle, real address,
 * and size.  Size must be 1, 2, 4, or 8.  The read is performed as a single
 * access operation using the given size.  If an error occurs when reading
 * from the given location, do not generate an error report, but return a
 * non-zero value in RET1.  If the read was successful, return zero in RET1
 * and return the actual data read in RET2.  The data returned is size based
 * byte swapped.
 *
 * Non-significant bits in RET2 are not guarenteed to have any specific value
 * and therefore must be ignored.  If RET1 is returned as non-zero, the data 
 * value is not guarenteed to have any specific value and should be ignored.
 *
 * The caller must have permission to read from the given devhandle, real
 * address, which must be an IO address.  The argument real address must be a
 * size aligned address.
 *
 * The hypervisor implementation of this function must block access to any
 * IO address that the guest does not have explicit permission to access.
 */
#define HV_FAST_PCI_PEEK		0xb6

/* pci_poke()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_POKE
 * ARG0:	devhandle
 * ARG1:	real address
 * ARG2:	size
 * ARG3:	data
 * ARG4:	pci_device
 * RET0:	status
 * RET1:	error_flag
 * ERRORS:	EINVAL		Invalid devhandle, size, or pci_device
 *		EBADALIGN	Improperly aligned real address
 *		ENORADDR	Bad real address
 *		ENOACCESS	Guest access prohibited
 *		ENOTSUPPORTED	Function is not supported by implementation
 *
 * Attempt to write data to the IO address given by the given devhandle,
 * real address, and size.  Size must be 1, 2, 4, or 8.  The write is
 * performed as a single access operation using the given size. Prior to
 * writing the data is size based swapped.
 *
 * If an error occurs when writing to the given location, do not generate an
 * error report, but return a non-zero value in RET1.  If the write was
 * successful, return zero in RET1.
 *
 * pci_device describes the configuration address of the device being
 * written to.  The implementation may safely read from offset 0 with
 * the configuration space of the device described by devhandle and
 * pci_device in order to guarantee that the write portion of the operation
 * completes
 *
 * Any error that occurs due to the read shall be reported using the normal
 * error reporting mechanisms .. the read error is not suppressed.
 *
 * The caller must have permission to write to the given devhandle, real
 * address, which must be an IO address.  The argument real address must be a
 * size aligned address.  The caller must have permission to read from
 * the given devhandle, pci_device cofiguration space offset 0.
 *
 * The hypervisor implementation of this function must block access to any
 * IO address that the guest does not have explicit permission to access.
 */
#define HV_FAST_PCI_POKE		0xb7

/* pci_dma_sync()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_DMA_SYNC
 * ARG0:	devhandle
 * ARG1:	real address
 * ARG2:	size
 * ARG3:	io_sync_direction
 * RET0:	status
 * RET1:	#synced
 * ERRORS:	EINVAL		Invalid devhandle or io_sync_direction
 *		ENORADDR	Bad real address
 *
 * Synchronize a memory region described by the given real address and size,
 * for the device defined by the given devhandle using the direction(s)
 * defined by the given io_sync_direction.  The argument size is the size of
 * the memory region in bytes.
 *
 * Return the actual number of bytes synchronized in the return value #synced,
 * which may be less than or equal to the argument size.  If the return
 * value #synced is less than size, the caller must continue to call this
 * function with updated real address and size arguments until the entire
 * memory region is synchronized.
 */
#define HV_FAST_PCI_DMA_SYNC		0xb8

/* PCI MSI services.  */

#define HV_MSITYPE_MSI32		0x00
#define HV_MSITYPE_MSI64		0x01

#define HV_MSIQSTATE_IDLE		0x00
#define HV_MSIQSTATE_ERROR		0x01

#define HV_MSIQ_INVALID			0x00
#define HV_MSIQ_VALID			0x01

#define HV_MSISTATE_IDLE		0x00
#define HV_MSISTATE_DELIVERED		0x01

#define HV_MSIVALID_INVALID		0x00
#define HV_MSIVALID_VALID		0x01

#define HV_PCIE_MSGTYPE_PME_MSG		0x18
#define HV_PCIE_MSGTYPE_PME_ACK_MSG	0x1b
#define HV_PCIE_MSGTYPE_CORR_MSG	0x30
#define HV_PCIE_MSGTYPE_NONFATAL_MSG	0x31
#define HV_PCIE_MSGTYPE_FATAL_MSG	0x33

#define HV_MSG_INVALID			0x00
#define HV_MSG_VALID			0x01

/* pci_msiq_conf()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSIQ_CONF
 * ARG0:	devhandle
 * ARG1:	msiqid
 * ARG2:	real address
 * ARG3:	number of entries
 * RET0:	status
 * ERRORS:	EINVAL		Invalid devhandle, msiqid or nentries
 *		EBADALIGN	Improperly aligned real address
 *		ENORADDR	Bad real address
 *
 * Configure the MSI queue given by the devhandle and msiqid arguments,
 * and to be placed at the given real address and be of the given
 * number of entries.  The real address must be aligned exactly to match
 * the queue size.  Each queue entry is 64-bytes long, so f.e. a 32 entry
 * queue must be aligned on a 2048 byte real address boundary.  The MSI-EQ
 * Head and Tail are initialized so that the MSI-EQ is 'empty'.
 *
 * Implementation Note: Certain implementations have fixed sized queues.  In
 *                      that case, number of entries must contain the correct
 *                      value.
 */
#define HV_FAST_PCI_MSIQ_CONF		0xc0

/* pci_msiq_info()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSIQ_INFO
 * ARG0:	devhandle
 * ARG1:	msiqid
 * RET0:	status
 * RET1:	real address
 * RET2:	number of entries
 * ERRORS:	EINVAL		Invalid devhandle or msiqid
 *
 * Return the configuration information for the MSI queue described
 * by the given devhandle and msiqid.  The base address of the queue
 * is returned in ARG1 and the number of entries is returned in ARG2.
 * If the queue is unconfigured, the real address is undefined and the
 * number of entries will be returned as zero.
 */
#define HV_FAST_PCI_MSIQ_INFO		0xc1

/* pci_msiq_getvalid()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSIQ_GETVALID
 * ARG0:	devhandle
 * ARG1:	msiqid
 * RET0:	status
 * RET1:	msiqvalid	(HV_MSIQ_VALID or HV_MSIQ_INVALID)
 * ERRORS:	EINVAL		Invalid devhandle or msiqid
 *
 * Get the valid state of the MSI-EQ described by the given devhandle and
 * msiqid.
 */
#define HV_FAST_PCI_MSIQ_GETVALID	0xc2

/* pci_msiq_setvalid()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSIQ_SETVALID
 * ARG0:	devhandle
 * ARG1:	msiqid
 * ARG2:	msiqvalid	(HV_MSIQ_VALID or HV_MSIQ_INVALID)
 * RET0:	status
 * ERRORS:	EINVAL		Invalid devhandle or msiqid or msiqvalid
 *				value or MSI EQ is uninitialized
 *
 * Set the valid state of the MSI-EQ described by the given devhandle and
 * msiqid to the given msiqvalid.
 */
#define HV_FAST_PCI_MSIQ_SETVALID	0xc3

/* pci_msiq_getstate()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSIQ_GETSTATE
 * ARG0:	devhandle
 * ARG1:	msiqid
 * RET0:	status
 * RET1:	msiqstate	(HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
 * ERRORS:	EINVAL		Invalid devhandle or msiqid
 *
 * Get the state of the MSI-EQ described by the given devhandle and
 * msiqid.
 */
#define HV_FAST_PCI_MSIQ_GETSTATE	0xc4

/* pci_msiq_getvalid()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSIQ_GETVALID
 * ARG0:	devhandle
 * ARG1:	msiqid
 * ARG2:	msiqstate	(HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
 * RET0:	status
 * ERRORS:	EINVAL		Invalid devhandle or msiqid or msiqstate
 *				value or MSI EQ is uninitialized
 *
 * Set the state of the MSI-EQ described by the given devhandle and
 * msiqid to the given msiqvalid.
 */
#define HV_FAST_PCI_MSIQ_SETSTATE	0xc5

/* pci_msiq_gethead()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSIQ_GETHEAD
 * ARG0:	devhandle
 * ARG1:	msiqid
 * RET0:	status
 * RET1:	msiqhead
 * ERRORS:	EINVAL		Invalid devhandle or msiqid
 *
 * Get the current MSI EQ queue head for the MSI-EQ described by the
 * given devhandle and msiqid.
 */
#define HV_FAST_PCI_MSIQ_GETHEAD	0xc6

/* pci_msiq_sethead()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSIQ_SETHEAD
 * ARG0:	devhandle
 * ARG1:	msiqid
 * ARG2:	msiqhead
 * RET0:	status
 * ERRORS:	EINVAL		Invalid devhandle or msiqid or msiqhead,
 *				or MSI EQ is uninitialized
 *
 * Set the current MSI EQ queue head for the MSI-EQ described by the
 * given devhandle and msiqid.
 */
#define HV_FAST_PCI_MSIQ_SETHEAD	0xc7

/* pci_msiq_gettail()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSIQ_GETTAIL
 * ARG0:	devhandle
 * ARG1:	msiqid
 * RET0:	status
 * RET1:	msiqtail
 * ERRORS:	EINVAL		Invalid devhandle or msiqid
 *
 * Get the current MSI EQ queue tail for the MSI-EQ described by the
 * given devhandle and msiqid.
 */
#define HV_FAST_PCI_MSIQ_GETTAIL	0xc8

/* pci_msi_getvalid()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSI_GETVALID
 * ARG0:	devhandle
 * ARG1:	msinum
 * RET0:	status
 * RET1:	msivalidstate
 * ERRORS:	EINVAL		Invalid devhandle or msinum
 *
 * Get the current valid/enabled state for the MSI defined by the
 * given devhandle and msinum.
 */
#define HV_FAST_PCI_MSI_GETVALID	0xc9

/* pci_msi_setvalid()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSI_SETVALID
 * ARG0:	devhandle
 * ARG1:	msinum
 * ARG2:	msivalidstate
 * RET0:	status
 * ERRORS:	EINVAL		Invalid devhandle or msinum or msivalidstate
 *
 * Set the current valid/enabled state for the MSI defined by the
 * given devhandle and msinum.
 */
#define HV_FAST_PCI_MSI_SETVALID	0xca

/* pci_msi_getmsiq()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSI_GETMSIQ
 * ARG0:	devhandle
 * ARG1:	msinum
 * RET0:	status
 * RET1:	msiqid
 * ERRORS:	EINVAL		Invalid devhandle or msinum or MSI is unbound
 *
 * Get the MSI EQ that the MSI defined by the given devhandle and
 * msinum is bound to.
 */
#define HV_FAST_PCI_MSI_GETMSIQ		0xcb

/* pci_msi_setmsiq()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSI_SETMSIQ
 * ARG0:	devhandle
 * ARG1:	msinum
 * ARG2:	msitype
 * ARG3:	msiqid
 * RET0:	status
 * ERRORS:	EINVAL		Invalid devhandle or msinum or msiqid
 *
 * Set the MSI EQ that the MSI defined by the given devhandle and
 * msinum is bound to.
 */
#define HV_FAST_PCI_MSI_SETMSIQ		0xcc

/* pci_msi_getstate()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSI_GETSTATE
 * ARG0:	devhandle
 * ARG1:	msinum
 * RET0:	status
 * RET1:	msistate
 * ERRORS:	EINVAL		Invalid devhandle or msinum
 *
 * Get the state of the MSI defined by the given devhandle and msinum.
 * If not initialized, return HV_MSISTATE_IDLE.
 */
#define HV_FAST_PCI_MSI_GETSTATE	0xcd

/* pci_msi_setstate()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSI_SETSTATE
 * ARG0:	devhandle
 * ARG1:	msinum
 * ARG2:	msistate
 * RET0:	status
 * ERRORS:	EINVAL		Invalid devhandle or msinum or msistate
 *
 * Set the state of the MSI defined by the given devhandle and msinum.
 */
#define HV_FAST_PCI_MSI_SETSTATE	0xce

/* pci_msg_getmsiq()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSG_GETMSIQ
 * ARG0:	devhandle
 * ARG1:	msgtype
 * RET0:	status
 * RET1:	msiqid
 * ERRORS:	EINVAL		Invalid devhandle or msgtype
 *
 * Get the MSI EQ of the MSG defined by the given devhandle and msgtype.
 */
#define HV_FAST_PCI_MSG_GETMSIQ		0xd0

/* pci_msg_setmsiq()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSG_SETMSIQ
 * ARG0:	devhandle
 * ARG1:	msgtype
 * ARG2:	msiqid
 * RET0:	status
 * ERRORS:	EINVAL		Invalid devhandle, msgtype, or msiqid
 *
 * Set the MSI EQ of the MSG defined by the given devhandle and msgtype.
 */
#define HV_FAST_PCI_MSG_SETMSIQ		0xd1

/* pci_msg_getvalid()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSG_GETVALID
 * ARG0:	devhandle
 * ARG1:	msgtype
 * RET0:	status
 * RET1:	msgvalidstate
 * ERRORS:	EINVAL		Invalid devhandle or msgtype
 *
 * Get the valid/enabled state of the MSG defined by the given
 * devhandle and msgtype.
 */
#define HV_FAST_PCI_MSG_GETVALID	0xd2

/* pci_msg_setvalid()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_PCI_MSG_SETVALID
 * ARG0:	devhandle
 * ARG1:	msgtype
 * ARG2:	msgvalidstate
 * RET0:	status
 * ERRORS:	EINVAL		Invalid devhandle or msgtype or msgvalidstate
 *
 * Set the valid/enabled state of the MSG defined by the given
 * devhandle and msgtype.
 */
#define HV_FAST_PCI_MSG_SETVALID	0xd3

/* Logical Domain Channel services.  */

#define LDC_CHANNEL_DOWN		0
#define LDC_CHANNEL_UP			1
#define LDC_CHANNEL_RESETTING		2

/* ldc_tx_qconf()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_TX_QCONF
 * ARG0:	channel ID
 * ARG1:	real address base of queue
 * ARG2:	num entries in queue
 * RET0:	status
 *
 * Configure transmit queue for the LDC endpoint specified by the
 * given channel ID, to be placed at the given real address, and
 * be of the given num entries.  Num entries must be a power of two.
 * The real address base of the queue must be aligned on the queue
 * size.  Each queue entry is 64-bytes, so for example, a 32 entry
 * queue must be aligned on a 2048 byte real address boundary.
 *
 * Upon configuration of a valid transmit queue the head and tail
 * pointers are set to a hypervisor specific identical value indicating
 * that the queue initially is empty.
 *
 * The endpoint's transmit queue is un-configured if num entries is zero.
 *
 * The maximum number of entries for each queue for a specific cpu may be
 * determined from the machine description.  A transmit queue may be
 * specified even in the event that the LDC is down (peer endpoint has no
 * receive queue specified).  Transmission will begin as soon as the peer
 * endpoint defines a receive queue.
 *
 * It is recommended that a guest wait for a transmit queue to empty prior
 * to reconfiguring it, or un-configuring it.  Re or un-configuring of a
 * non-empty transmit queue behaves exactly as defined above, however it
 * is undefined as to how many of the pending entries in the original queue
 * will be delivered prior to the re-configuration taking effect.
 * Furthermore, as the queue configuration causes a reset of the head and
 * tail pointers there is no way for a guest to determine how many entries
 * have been sent after the configuration operation.
 */
#define HV_FAST_LDC_TX_QCONF		0xe0

/* ldc_tx_qinfo()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_TX_QINFO
 * ARG0:	channel ID
 * RET0:	status
 * RET1:	real address base of queue
 * RET2:	num entries in queue
 *
 * Return the configuration info for the transmit queue of LDC endpoint
 * defined by the given channel ID.  The real address is the currently
 * defined real address base of the defined queue, and num entries is the
 * size of the queue in terms of number of entries.
 *
 * If the specified channel ID is a valid endpoint number, but no transmit
 * queue has been defined this service will return success, but with num
 * entries set to zero and the real address will have an undefined value.
 */
#define HV_FAST_LDC_TX_QINFO		0xe1

/* ldc_tx_get_state()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_TX_GET_STATE
 * ARG0:	channel ID
 * RET0:	status
 * RET1:	head offset
 * RET2:	tail offset
 * RET3:	channel state
 *
 * Return the transmit state, and the head and tail queue pointers, for
 * the transmit queue of the LDC endpoint defined by the given channel ID.
 * The head and tail values are the byte offset of the head and tail
 * positions of the transmit queue for the specified endpoint.
 */
#define HV_FAST_LDC_TX_GET_STATE	0xe2

/* ldc_tx_set_qtail()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_TX_SET_QTAIL
 * ARG0:	channel ID
 * ARG1:	tail offset
 * RET0:	status
 *
 * Update the tail pointer for the transmit queue associated with the LDC
 * endpoint defined by the given channel ID.  The tail offset specified
 * must be aligned on a 64 byte boundary, and calculated so as to increase
 * the number of pending entries on the transmit queue.  Any attempt to
 * decrease the number of pending transmit queue entires is considered
 * an invalid tail offset and will result in an EINVAL error.
 *
 * Since the tail of the transmit queue may not be moved backwards, the
 * transmit queue may be flushed by configuring a new transmit queue,
 * whereupon the hypervisor will configure the initial transmit head and
 * tail pointers to be equal.
 */
#define HV_FAST_LDC_TX_SET_QTAIL	0xe3

/* ldc_rx_qconf()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_RX_QCONF
 * ARG0:	channel ID
 * ARG1:	real address base of queue
 * ARG2:	num entries in queue
 * RET0:	status
 *
 * Configure receive queue for the LDC endpoint specified by the
 * given channel ID, to be placed at the given real address, and
 * be of the given num entries.  Num entries must be a power of two.
 * The real address base of the queue must be aligned on the queue
 * size.  Each queue entry is 64-bytes, so for example, a 32 entry
 * queue must be aligned on a 2048 byte real address boundary.
 *
 * The endpoint's transmit queue is un-configured if num entries is zero.
 *
 * If a valid receive queue is specified for a local endpoint the LDC is
 * in the up state for the purpose of transmission to this endpoint.
 *
 * The maximum number of entries for each queue for a specific cpu may be
 * determined from the machine description.
 *
 * As receive queue configuration causes a reset of the queue's head and
 * tail pointers there is no way for a gues to determine how many entries
 * have been received between a preceeding ldc_get_rx_state() API call
 * and the completion of the configuration operation.  It should be noted
 * that datagram delivery is not guarenteed via domain channels anyway,
 * and therefore any higher protocol should be resilient to datagram
 * loss if necessary.  However, to overcome this specific race potential
 * it is recommended, for example, that a higher level protocol be employed
 * to ensure either retransmission, or ensure that no datagrams are pending
 * on the peer endpoint's transmit queue prior to the configuration process.
 */
#define HV_FAST_LDC_RX_QCONF		0xe4

/* ldc_rx_qinfo()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_RX_QINFO
 * ARG0:	channel ID
 * RET0:	status
 * RET1:	real address base of queue
 * RET2:	num entries in queue
 *
 * Return the configuration info for the receive queue of LDC endpoint
 * defined by the given channel ID.  The real address is the currently
 * defined real address base of the defined queue, and num entries is the
 * size of the queue in terms of number of entries.
 *
 * If the specified channel ID is a valid endpoint number, but no receive
 * queue has been defined this service will return success, but with num
 * entries set to zero and the real address will have an undefined value.
 */
#define HV_FAST_LDC_RX_QINFO		0xe5

/* ldc_rx_get_state()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_RX_GET_STATE
 * ARG0:	channel ID
 * RET0:	status
 * RET1:	head offset
 * RET2:	tail offset
 * RET3:	channel state
 *
 * Return the receive state, and the head and tail queue pointers, for
 * the receive queue of the LDC endpoint defined by the given channel ID.
 * The head and tail values are the byte offset of the head and tail
 * positions of the receive queue for the specified endpoint.
 */
#define HV_FAST_LDC_RX_GET_STATE	0xe6

/* ldc_rx_set_qhead()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_RX_SET_QHEAD
 * ARG0:	channel ID
 * ARG1:	head offset
 * RET0:	status
 *
 * Update the head pointer for the receive queue associated with the LDC
 * endpoint defined by the given channel ID.  The head offset specified
 * must be aligned on a 64 byte boundary, and calculated so as to decrease
 * the number of pending entries on the receive queue.  Any attempt to
 * increase the number of pending receive queue entires is considered
 * an invalid head offset and will result in an EINVAL error.
 *
 * The receive queue may be flushed by setting the head offset equal
 * to the current tail offset.
 */
#define HV_FAST_LDC_RX_SET_QHEAD	0xe7

/* LDC Map Table Entry.  Each slot is defined by a translation table
 * entry, as specified by the LDC_MTE_* bits below, and a 64-bit
 * hypervisor invalidation cookie.
 */
#define LDC_MTE_PADDR	0x0fffffffffffe000 /* pa[55:13]          */
#define LDC_MTE_COPY_W	0x0000000000000400 /* copy write access  */
#define LDC_MTE_COPY_R	0x0000000000000200 /* copy read access   */
#define LDC_MTE_IOMMU_W	0x0000000000000100 /* IOMMU write access */
#define LDC_MTE_IOMMU_R	0x0000000000000080 /* IOMMU read access  */
#define LDC_MTE_EXEC	0x0000000000000040 /* execute            */
#define LDC_MTE_WRITE	0x0000000000000020 /* read               */
#define LDC_MTE_READ	0x0000000000000010 /* write              */
#define LDC_MTE_SZALL	0x000000000000000f /* page size bits     */
#define LDC_MTE_SZ16GB	0x0000000000000007 /* 16GB page          */
#define LDC_MTE_SZ2GB	0x0000000000000006 /* 2GB page           */
#define LDC_MTE_SZ256MB	0x0000000000000005 /* 256MB page         */
#define LDC_MTE_SZ32MB	0x0000000000000004 /* 32MB page          */
#define LDC_MTE_SZ4MB	0x0000000000000003 /* 4MB page           */
#define LDC_MTE_SZ512K	0x0000000000000002 /* 512K page          */
#define LDC_MTE_SZ64K	0x0000000000000001 /* 64K page           */
#define LDC_MTE_SZ8K	0x0000000000000000 /* 8K page            */

#ifndef __ASSEMBLY__
struct ldc_mtable_entry {
	unsigned long	mte;
	unsigned long	cookie;
};
#endif

/* ldc_set_map_table()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_SET_MAP_TABLE
 * ARG0:	channel ID
 * ARG1:	table real address
 * ARG2:	num entries
 * RET0:	status
 *
 * Register the MTE table at the given table real address, with the
 * specified num entries, for the LDC indicated by the given channel
 * ID.
 */
#define HV_FAST_LDC_SET_MAP_TABLE	0xea

/* ldc_get_map_table()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_GET_MAP_TABLE
 * ARG0:	channel ID
 * RET0:	status
 * RET1:	table real address
 * RET2:	num entries
 *
 * Return the configuration of the current mapping table registered
 * for the given channel ID.
 */
#define HV_FAST_LDC_GET_MAP_TABLE	0xeb

#define LDC_COPY_IN	0
#define LDC_COPY_OUT	1

/* ldc_copy()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_COPY
 * ARG0:	channel ID
 * ARG1:	LDC_COPY_* direction code
 * ARG2:	target real address
 * ARG3:	local real address
 * ARG4:	length in bytes
 * RET0:	status
 * RET1:	actual length in bytes
 */
#define HV_FAST_LDC_COPY		0xec

#define LDC_MEM_READ	1
#define LDC_MEM_WRITE	2
#define LDC_MEM_EXEC	4

/* ldc_mapin()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_MAPIN
 * ARG0:	channel ID
 * ARG1:	cookie
 * RET0:	status
 * RET1:	real address
 * RET2:	LDC_MEM_* permissions
 */
#define HV_FAST_LDC_MAPIN		0xed

/* ldc_unmap()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_UNMAP
 * ARG0:	real address
 * RET0:	status
 */
#define HV_FAST_LDC_UNMAP		0xee

/* ldc_revoke()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_LDC_REVOKE
 * ARG0:	channel ID
 * ARG1:	cookie
 * ARG2:	ldc_mtable_entry cookie
 * RET0:	status
 */
#define HV_FAST_LDC_REVOKE		0xef

#ifndef __ASSEMBLY__
extern unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
					unsigned long ra,
					unsigned long num_entries);
extern unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
					unsigned long *ra,
					unsigned long *num_entries);
extern unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
					    unsigned long *head_off,
					    unsigned long *tail_off,
					    unsigned long *chan_state);
extern unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
					    unsigned long tail_off);
extern unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
					unsigned long ra,
					unsigned long num_entries);
extern unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
					unsigned long *ra,
					unsigned long *num_entries);
extern unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
					    unsigned long *head_off,
					    unsigned long *tail_off,
					    unsigned long *chan_state);
extern unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
					    unsigned long head_off);
extern unsigned long sun4v_ldc_set_map_table(unsigned long channel,
					     unsigned long ra,
					     unsigned long num_entries);
extern unsigned long sun4v_ldc_get_map_table(unsigned long channel,
					     unsigned long *ra,
					     unsigned long *num_entries);
extern unsigned long sun4v_ldc_copy(unsigned long channel,
				    unsigned long dir_code,
				    unsigned long tgt_raddr,
				    unsigned long lcl_raddr,
				    unsigned long len,
				    unsigned long *actual_len);
extern unsigned long sun4v_ldc_mapin(unsigned long channel,
				     unsigned long cookie,
				     unsigned long *ra,
				     unsigned long *perm);
extern unsigned long sun4v_ldc_unmap(unsigned long ra);
extern unsigned long sun4v_ldc_revoke(unsigned long channel,
				      unsigned long cookie,
				      unsigned long mte_cookie);
#endif

/* Performance counter services.  */

#define HV_PERF_JBUS_PERF_CTRL_REG	0x00
#define HV_PERF_JBUS_PERF_CNT_REG	0x01
#define HV_PERF_DRAM_PERF_CTRL_REG_0	0x02
#define HV_PERF_DRAM_PERF_CNT_REG_0	0x03
#define HV_PERF_DRAM_PERF_CTRL_REG_1	0x04
#define HV_PERF_DRAM_PERF_CNT_REG_1	0x05
#define HV_PERF_DRAM_PERF_CTRL_REG_2	0x06
#define HV_PERF_DRAM_PERF_CNT_REG_2	0x07
#define HV_PERF_DRAM_PERF_CTRL_REG_3	0x08
#define HV_PERF_DRAM_PERF_CNT_REG_3	0x09

/* get_perfreg()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_GET_PERFREG
 * ARG0:	performance reg number
 * RET0:	status
 * RET1:	performance reg value
 * ERRORS:	EINVAL		Invalid performance register number
 *		ENOACCESS	No access allowed to performance counters
 *
 * Read the value of the given DRAM/JBUS performance counter/control register.
 */
#define HV_FAST_GET_PERFREG		0x100

/* set_perfreg()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_SET_PERFREG
 * ARG0:	performance reg number
 * ARG1:	performance reg value
 * RET0:	status
 * ERRORS:	EINVAL		Invalid performance register number
 *		ENOACCESS	No access allowed to performance counters
 *
 * Write the given performance reg value to the given DRAM/JBUS
 * performance counter/control register.
 */
#define HV_FAST_SET_PERFREG		0x101

/* MMU statistics services.
 *
 * The hypervisor maintains MMU statistics and privileged code provides
 * a buffer where these statistics can be collected.  It is continually
 * updated once configured.  The layout is as follows:
 */
#ifndef __ASSEMBLY__
struct hv_mmu_statistics {
	unsigned long immu_tsb_hits_ctx0_8k_tte;
	unsigned long immu_tsb_ticks_ctx0_8k_tte;
	unsigned long immu_tsb_hits_ctx0_64k_tte;
	unsigned long immu_tsb_ticks_ctx0_64k_tte;
	unsigned long __reserved1[2];
	unsigned long immu_tsb_hits_ctx0_4mb_tte;
	unsigned long immu_tsb_ticks_ctx0_4mb_tte;
	unsigned long __reserved2[2];
	unsigned long immu_tsb_hits_ctx0_256mb_tte;
	unsigned long immu_tsb_ticks_ctx0_256mb_tte;
	unsigned long __reserved3[4];
	unsigned long immu_tsb_hits_ctxnon0_8k_tte;
	unsigned long immu_tsb_ticks_ctxnon0_8k_tte;
	unsigned long immu_tsb_hits_ctxnon0_64k_tte;
	unsigned long immu_tsb_ticks_ctxnon0_64k_tte;
	unsigned long __reserved4[2];
	unsigned long immu_tsb_hits_ctxnon0_4mb_tte;
	unsigned long immu_tsb_ticks_ctxnon0_4mb_tte;
	unsigned long __reserved5[2];
	unsigned long immu_tsb_hits_ctxnon0_256mb_tte;
	unsigned long immu_tsb_ticks_ctxnon0_256mb_tte;
	unsigned long __reserved6[4];
	unsigned long dmmu_tsb_hits_ctx0_8k_tte;
	unsigned long dmmu_tsb_ticks_ctx0_8k_tte;
	unsigned long dmmu_tsb_hits_ctx0_64k_tte;
	unsigned long dmmu_tsb_ticks_ctx0_64k_tte;
	unsigned long __reserved7[2];
	unsigned long dmmu_tsb_hits_ctx0_4mb_tte;
	unsigned long dmmu_tsb_ticks_ctx0_4mb_tte;
	unsigned long __reserved8[2];
	unsigned long dmmu_tsb_hits_ctx0_256mb_tte;
	unsigned long dmmu_tsb_ticks_ctx0_256mb_tte;
	unsigned long __reserved9[4];
	unsigned long dmmu_tsb_hits_ctxnon0_8k_tte;
	unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte;
	unsigned long dmmu_tsb_hits_ctxnon0_64k_tte;
	unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte;
	unsigned long __reserved10[2];
	unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte;
	unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte;
	unsigned long __reserved11[2];
	unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte;
	unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte;
	unsigned long __reserved12[4];
};
#endif

/* mmustat_conf()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMUSTAT_CONF
 * ARG0:	real address
 * RET0:	status
 * RET1:	real address
 * ERRORS:	ENORADDR	Invalid real address
 *		EBADALIGN	Real address not aligned on 64-byte boundary
 *		EBADTRAP	API not supported on this processor
 *
 * Enable MMU statistic gathering using the buffer at the given real
 * address on the current virtual CPU.  The new buffer real address
 * is given in ARG1, and the previously specified buffer real address
 * is returned in RET1, or is returned as zero for the first invocation.
 *
 * If the passed in real address argument is zero, this will disable
 * MMU statistic collection on the current virtual CPU.  If an error is
 * returned then no statistics are collected.
 *
 * The buffer contents should be initialized to all zeros before being
 * given to the hypervisor or else the statistics will be meaningless.
 */
#define HV_FAST_MMUSTAT_CONF		0x102

/* mmustat_info()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_MMUSTAT_INFO
 * RET0:	status
 * RET1:	real address
 * ERRORS:	EBADTRAP	API not supported on this processor
 *
 * Return the current state and real address of the currently configured
 * MMU statistics buffer on the current virtual CPU.
 */
#define HV_FAST_MMUSTAT_INFO		0x103

#ifndef __ASSEMBLY__
extern unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
extern unsigned long sun4v_mmustat_info(unsigned long *ra);
#endif

/* NCS crypto services  */

/* ncs_request() sub-function numbers */
#define HV_NCS_QCONF			0x01
#define HV_NCS_QTAIL_UPDATE		0x02

#ifndef __ASSEMBLY__
struct hv_ncs_queue_entry {
	/* MAU Control Register */
	unsigned long	mau_control;
#define MAU_CONTROL_INV_PARITY	0x0000000000002000
#define MAU_CONTROL_STRAND	0x0000000000001800
#define MAU_CONTROL_BUSY	0x0000000000000400
#define MAU_CONTROL_INT		0x0000000000000200
#define MAU_CONTROL_OP		0x00000000000001c0
#define MAU_CONTROL_OP_SHIFT	6
#define MAU_OP_LOAD_MA_MEMORY	0x0
#define MAU_OP_STORE_MA_MEMORY	0x1
#define MAU_OP_MODULAR_MULT	0x2
#define MAU_OP_MODULAR_REDUCE	0x3
#define MAU_OP_MODULAR_EXP_LOOP	0x4
#define MAU_CONTROL_LEN		0x000000000000003f
#define MAU_CONTROL_LEN_SHIFT	0

	/* Real address of bytes to load or store bytes
	 * into/out-of the MAU.
	 */
	unsigned long	mau_mpa;

	/* Modular Arithmetic MA Offset Register.  */
	unsigned long	mau_ma;

	/* Modular Arithmetic N Prime Register.  */
	unsigned long	mau_np;
};

struct hv_ncs_qconf_arg {
	unsigned long	mid;      /* MAU ID, 1 per core on Niagara */
	unsigned long	base;     /* Real address base of queue */
	unsigned long	end;	  /* Real address end of queue */
	unsigned long	num_ents; /* Number of entries in queue */
};

struct hv_ncs_qtail_update_arg {
	unsigned long	mid;      /* MAU ID, 1 per core on Niagara */
	unsigned long	tail;     /* New tail index to use */
	unsigned long	syncflag; /* only SYNCFLAG_SYNC is implemented */
#define HV_NCS_SYNCFLAG_SYNC	0x00
#define HV_NCS_SYNCFLAG_ASYNC	0x01
};
#endif

/* ncs_request()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_NCS_REQUEST
 * ARG0:	NCS sub-function
 * ARG1:	sub-function argument real address
 * ARG2:	size in bytes of sub-function argument
 * RET0:	status
 *
 * The MAU chip of the Niagara processor is not directly accessible
 * to privileged code, instead it is programmed indirectly via this
 * hypervisor API.
 *
 * The interfaces defines a queue of MAU operations to perform.
 * Privileged code registers a queue with the hypervisor by invoking
 * this HVAPI with the HV_NCS_QCONF sub-function, which defines the
 * base, end, and number of entries of the queue.  Each queue entry
 * contains a MAU register struct block.
 *
 * The privileged code then proceeds to add entries to the queue and
 * then invoke the HV_NCS_QTAIL_UPDATE sub-function.  Since only
 * synchronous operations are supported by the current hypervisor,
 * HV_NCS_QTAIL_UPDATE will run all the pending queue entries to
 * completion and return HV_EOK, or return an error code.
 *
 * The real address of the sub-function argument must be aligned on at
 * least an 8-byte boundary.
 *
 * The tail argument of HV_NCS_QTAIL_UPDATE is an index, not a byte
 * offset, into the queue and must be less than or equal the 'num_ents'
 * argument given in the HV_NCS_QCONF call.
 */
#define HV_FAST_NCS_REQUEST		0x110

#ifndef __ASSEMBLY__
extern unsigned long sun4v_ncs_request(unsigned long request,
				       unsigned long arg_ra,
				       unsigned long arg_size);
#endif

#define HV_FAST_FIRE_GET_PERFREG	0x120
#define HV_FAST_FIRE_SET_PERFREG	0x121

/* Function numbers for HV_CORE_TRAP.  */
#define HV_CORE_SET_VER			0x00
#define HV_CORE_PUTCHAR			0x01
#define HV_CORE_EXIT			0x02
#define HV_CORE_GET_VER			0x03

/* Hypervisor API groups for use with HV_CORE_SET_VER and
 * HV_CORE_GET_VER.
 */
#define HV_GRP_SUN4V			0x0000
#define HV_GRP_CORE			0x0001
#define HV_GRP_INTR			0x0002
#define HV_GRP_SOFT_STATE		0x0003
#define HV_GRP_PCI			0x0100
#define HV_GRP_LDOM			0x0101
#define HV_GRP_SVC_CHAN			0x0102
#define HV_GRP_NCS			0x0103
#define HV_GRP_NIAG_PERF		0x0200
#define HV_GRP_FIRE_PERF		0x0201
#define HV_GRP_DIAG			0x0300

#ifndef __ASSEMBLY__
extern unsigned long sun4v_get_version(unsigned long group,
				       unsigned long *major,
				       unsigned long *minor);
extern unsigned long sun4v_set_version(unsigned long group,
				       unsigned long major,
				       unsigned long minor,
				       unsigned long *actual_minor);

extern int sun4v_hvapi_register(unsigned long group, unsigned long major,
				unsigned long *minor);
extern void sun4v_hvapi_unregister(unsigned long group);
extern int sun4v_hvapi_get(unsigned long group,
			   unsigned long *major,
			   unsigned long *minor);
extern void sun4v_hvapi_init(void);
#endif

#endif /* !(_SPARC64_HYPERVISOR_H) */