File: interfaces.py

package info (click to toggle)
tahoe-lafs 1.9.2-1
  • links: PTS, VCS
  • area: main
  • in suites: wheezy
  • size: 7,240 kB
  • sloc: python: 71,758; makefile: 215; lisp: 89
file content (2758 lines) | stat: -rw-r--r-- 128,330 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758

from zope.interface import Interface
from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
     ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable

HASH_SIZE=32
SALT_SIZE=16

SDMF_VERSION=0
MDMF_VERSION=1

Hash = StringConstraint(maxLength=HASH_SIZE,
                        minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
Nodeid = StringConstraint(maxLength=20,
                          minLength=20) # binary format 20-byte SHA1 hash
FURL = StringConstraint(1000)
StorageIndex = StringConstraint(16)
URI = StringConstraint(300) # kind of arbitrary

MAX_BUCKETS = 256  # per peer -- zfec offers at most 256 shares per file

DEFAULT_MAX_SEGMENT_SIZE = 128*1024

ShareData = StringConstraint(None)
URIExtensionData = StringConstraint(1000)
Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
Offset = Number
ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
WriteEnablerSecret = Hash # used to protect mutable bucket modifications
LeaseRenewSecret = Hash # used to protect bucket lease renewal requests
LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests

class RIStubClient(RemoteInterface):
    """Each client publishes a service announcement for a dummy object called
    the StubClient. This object doesn't actually offer any services, but the
    announcement helps the Introducer keep track of which clients are
    subscribed (so the grid admin can keep track of things like the size of
    the grid and the client versions in use. This is the (empty)
    RemoteInterface for the StubClient."""

class RIBucketWriter(RemoteInterface):
    """ Objects of this kind live on the server side. """
    def write(offset=Offset, data=ShareData):
        return None

    def close():
        """
        If the data that has been written is incomplete or inconsistent then
        the server will throw the data away, else it will store it for future
        retrieval.
        """
        return None

    def abort():
        """Abandon all the data that has been written.
        """
        return None

class RIBucketReader(RemoteInterface):
    def read(offset=Offset, length=ReadSize):
        return ShareData

    def advise_corrupt_share(reason=str):
        """Clients who discover hash failures in shares that they have
        downloaded from me will use this method to inform me about the
        failures. I will record their concern so that my operator can
        manually inspect the shares in question. I return None.

        This is a wrapper around RIStorageServer.advise_corrupt_share(),
        which is tied to a specific share, and therefore does not need the
        extra share-identifying arguments. Please see that method for full
        documentation.
        """

TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
# elements are (offset, length, operator, specimen)
# operator is one of "lt, le, eq, ne, ge, gt"
# nop always passes and is used to fetch data while writing.
# you should use length==len(specimen) for everything except nop
DataVector = ListOf(TupleOf(Offset, ShareData))
# (offset, data). This limits us to 30 writes of 1MiB each per call
TestAndWriteVectorsForShares = DictOf(int,
                                      TupleOf(TestVector,
                                              DataVector,
                                              ChoiceOf(None, Offset), # new_length
                                              ))
ReadVector = ListOf(TupleOf(Offset, ReadSize))
ReadData = ListOf(ShareData)
# returns data[offset:offset+length] for each element of TestVector

class RIStorageServer(RemoteInterface):
    __remote_name__ = "RIStorageServer.tahoe.allmydata.com"

    def get_version():
        """
        Return a dictionary of version information.
        """
        return DictOf(str, Any())

    def allocate_buckets(storage_index=StorageIndex,
                         renew_secret=LeaseRenewSecret,
                         cancel_secret=LeaseCancelSecret,
                         sharenums=SetOf(int, maxLength=MAX_BUCKETS),
                         allocated_size=Offset, canary=Referenceable):
        """
        @param storage_index: the index of the bucket to be created or
                              increfed.
        @param sharenums: these are the share numbers (probably between 0 and
                          99) that the sender is proposing to store on this
                          server.
        @param renew_secret: This is the secret used to protect bucket refresh
                             This secret is generated by the client and
                             stored for later comparison by the server. Each
                             server is given a different secret.
        @param cancel_secret: Like renew_secret, but protects bucket decref.
        @param canary: If the canary is lost before close(), the bucket is
                       deleted.
        @return: tuple of (alreadygot, allocated), where alreadygot is what we
                 already have and allocated is what we hereby agree to accept.
                 New leases are added for shares in both lists.
        """
        return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
                       DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))

    def add_lease(storage_index=StorageIndex,
                  renew_secret=LeaseRenewSecret,
                  cancel_secret=LeaseCancelSecret):
        """
        Add a new lease on the given bucket. If the renew_secret matches an
        existing lease, that lease will be renewed instead. If there is no
        bucket for the given storage_index, return silently. (note that in
        tahoe-1.3.0 and earlier, IndexError was raised if there was no
        bucket)
        """
        return Any() # returns None now, but future versions might change

    def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
        """
        Renew the lease on a given bucket, resetting the timer to 31 days.
        Some networks will use this, some will not. If there is no bucket for
        the given storage_index, IndexError will be raised.

        For mutable shares, if the given renew_secret does not match an
        existing lease, IndexError will be raised with a note listing the
        server-nodeids on the existing leases, so leases on migrated shares
        can be renewed or cancelled. For immutable shares, IndexError
        (without the note) will be raised.
        """
        return Any()

    def get_buckets(storage_index=StorageIndex):
        return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)



    def slot_readv(storage_index=StorageIndex,
                   shares=ListOf(int), readv=ReadVector):
        """Read a vector from the numbered shares associated with the given
        storage index. An empty shares list means to return data from all
        known shares. Returns a dictionary with one key per share."""
        return DictOf(int, ReadData) # shnum -> results

    def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
                                        secrets=TupleOf(WriteEnablerSecret,
                                                        LeaseRenewSecret,
                                                        LeaseCancelSecret),
                                        tw_vectors=TestAndWriteVectorsForShares,
                                        r_vector=ReadVector,
                                        ):
        """
        General-purpose test-read-and-set operation for mutable slots:
        (1) For submitted shnums, compare the test vectors against extant
            shares, or against an empty share for shnums that do not exist.
        (2) Use the read vectors to extract "old data" from extant shares.
        (3) If all tests in (1) passed, then apply the write vectors
            (possibly creating new shares).
        (4) Return whether the tests passed, and the "old data", which does
            not include any modifications made by the writes.

        The operation does not interleave with other operations on the same
        shareset.

        This method is, um, large. The goal is to allow clients to update all
        the shares associated with a mutable file in a single round trip.

        @param storage_index: the index of the bucket to be created or
                              increfed.
        @param write_enabler: a secret that is stored along with the slot.
                              Writes are accepted from any caller who can
                              present the matching secret. A different secret
                              should be used for each slot*server pair.
        @param renew_secret: This is the secret used to protect bucket refresh
                             This secret is generated by the client and
                             stored for later comparison by the server. Each
                             server is given a different secret.
        @param cancel_secret: Like renew_secret, but protects bucket decref.

        The 'secrets' argument is a tuple of (write_enabler, renew_secret,
        cancel_secret). The first is required to perform any write. The
        latter two are used when allocating new shares. To simply acquire a
        new lease on existing shares, use an empty testv and an empty writev.

        Each share can have a separate test vector (i.e. a list of
        comparisons to perform). If all vectors for all shares pass, then all
        writes for all shares are recorded. Each comparison is a 4-tuple of
        (offset, length, operator, specimen), which effectively does a
        bool( (read(offset, length)) OPERATOR specimen ) and only performs
        the write if all these evaluate to True. Basic test-and-set uses 'eq'.
        Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
        Write-if-same-or-newer uses 'le'.

        Reads from the end of the container are truncated, and missing shares
        behave like empty ones, so to assert that a share doesn't exist (for
        use when creating a new share), use (0, 1, 'eq', '').

        The write vector will be applied to the given share, expanding it if
        necessary. A write vector applied to a share number that did not
        exist previously will cause that share to be created. Write vectors
        must not overlap (if they do, this will either cause an error or
        apply them in an unspecified order). Duplicate write vectors, with
        the same offset and data, are currently tolerated but are not
        desirable.

        In Tahoe-LAFS v1.8.3 or later (except 1.9.0a1), if you send a write
        vector whose offset is beyond the end of the current data, the space
        between the end of the current data and the beginning of the write
        vector will be filled with zero bytes. In earlier versions the
        contents of this space was unspecified (and might end up containing
        secrets). Storage servers with the new zero-filling behavior will
        advertise a true value for the 'fills-holes-with-zero-bytes' key
        (under 'http://allmydata.org/tahoe/protocols/storage/v1') in their
        version information.

        Each write vector is accompanied by a 'new_length' argument, which
        can be used to truncate the data. If new_length is not None and it is
        less than the current size of the data (after applying all write
        vectors), then the data will be truncated to new_length. If
        new_length==0, the share will be deleted.

        In Tahoe-LAFS v1.8.2 and earlier, new_length could also be used to
        enlarge the file by sending a number larger than the size of the data
        after applying all write vectors. That behavior was not used, and as
        of Tahoe-LAFS v1.8.3 it no longer works and the new_length is ignored
        in that case.

        If a storage client knows that the server supports zero-filling, for
        example from the 'fills-holes-with-zero-bytes' key in its version
        information, it can extend the file efficiently by writing a single
        zero byte just before the new end-of-file. Otherwise it must
        explicitly write zeroes to all bytes between the old and new
        end-of-file. In any case it should avoid sending new_length larger
        than the size of the data after applying all write vectors.

        The read vector is used to extract data from all known shares,
        *before* any writes have been applied. The same read vector is used
        for all shares. This captures the state that was tested by the test
        vector, for extant shares.

        This method returns two values: a boolean and a dict. The boolean is
        True if the write vectors were applied, False if not. The dict is
        keyed by share number, and each value contains a list of strings, one
        for each element of the read vector.

        If the write_enabler is wrong, this will raise BadWriteEnablerError.
        To enable share migration (using update_write_enabler), the exception
        will have the nodeid used for the old write enabler embedded in it,
        in the following string::

         The write enabler was recorded by nodeid '%s'.

        Note that the nodeid here is encoded using the same base32 encoding
        used by Foolscap and allmydata.util.idlib.nodeid_b2a().
        """
        return TupleOf(bool, DictOf(int, ReadData))

    def advise_corrupt_share(share_type=str, storage_index=StorageIndex,
                             shnum=int, reason=str):
        """Clients who discover hash failures in shares that they have
        downloaded from me will use this method to inform me about the
        failures. I will record their concern so that my operator can
        manually inspect the shares in question. I return None.

        'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
        (binary) storage index string, and 'shnum' is the integer share
        number. 'reason' is a human-readable explanation of the problem,
        probably including some expected hash values and the computed ones
        which did not match. Corruption advisories for mutable shares should
        include a hash of the public key (the same value that appears in the
        mutable-file verify-cap), since the current share format does not
        store that on disk.
        """

class IStorageBucketWriter(Interface):
    """
    Objects of this kind live on the client side.
    """
    def put_block(segmentnum=int, data=ShareData):
        """@param data: For most segments, this data will be 'blocksize'
        bytes in length. The last segment might be shorter.
        @return: a Deferred that fires (with None) when the operation completes
        """

    def put_plaintext_hashes(hashes=ListOf(Hash)):
        """
        @return: a Deferred that fires (with None) when the operation completes
        """

    def put_crypttext_hashes(hashes=ListOf(Hash)):
        """
        @return: a Deferred that fires (with None) when the operation completes
        """

    def put_block_hashes(blockhashes=ListOf(Hash)):
        """
        @return: a Deferred that fires (with None) when the operation completes
        """

    def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))):
        """
        @return: a Deferred that fires (with None) when the operation completes
        """

    def put_uri_extension(data=URIExtensionData):
        """This block of data contains integrity-checking information (hashes
        of plaintext, crypttext, and shares), as well as encoding parameters
        that are necessary to recover the data. This is a serialized dict
        mapping strings to other strings. The hash of this data is kept in
        the URI and verified before any of the data is used. All buckets for
        a given file contain identical copies of this data.

        The serialization format is specified with the following pseudocode:
        for k in sorted(dict.keys()):
            assert re.match(r'^[a-zA-Z_\-]+$', k)
            write(k + ':' + netstring(dict[k]))

        @return: a Deferred that fires (with None) when the operation completes
        """

    def close():
        """Finish writing and close the bucket. The share is not finalized
        until this method is called: if the uploading client disconnects
        before calling close(), the partially-written share will be
        discarded.

        @return: a Deferred that fires (with None) when the operation completes
        """

class IStorageBucketReader(Interface):

    def get_block_data(blocknum=int, blocksize=int, size=int):
        """Most blocks will be the same size. The last block might be shorter
        than the others.

        @return: ShareData
        """

    def get_crypttext_hashes():
        """
        @return: ListOf(Hash)
        """

    def get_block_hashes(at_least_these=SetOf(int)):
        """
        @return: ListOf(Hash)
        """

    def get_share_hashes(at_least_these=SetOf(int)):
        """
        @return: ListOf(TupleOf(int, Hash))
        """

    def get_uri_extension():
        """
        @return: URIExtensionData
        """

class IStorageBroker(Interface):
    def get_servers_for_psi(peer_selection_index):
        """
        @return: list of IServer instances
        """
    def get_connected_servers():
        """
        @return: frozenset of connected IServer instances
        """
    def get_known_servers():
        """
        @return: frozenset of IServer instances
        """
    def get_all_serverids():
        """
        @return: frozenset of serverid strings
        """
    def get_nickname_for_serverid(serverid):
        """
        @return: unicode nickname, or None
        """

    # methods moved from IntroducerClient, need review
    def get_all_connections():
        """Return a frozenset of (nodeid, service_name, rref) tuples, one for
        each active connection we've established to a remote service. This is
        mostly useful for unit tests that need to wait until a certain number
        of connections have been made."""

    def get_all_connectors():
        """Return a dict that maps from (nodeid, service_name) to a
        RemoteServiceConnector instance for all services that we are actively
        trying to connect to. Each RemoteServiceConnector has the following
        public attributes::

          service_name: the type of service provided, like 'storage'
          announcement_time: when we first heard about this service
          last_connect_time: when we last established a connection
          last_loss_time: when we last lost a connection

          version: the peer's version, from the most recent connection
          oldest_supported: the peer's oldest supported version, same

          rref: the RemoteReference, if connected, otherwise None
          remote_host: the IAddress, if connected, otherwise None

        This method is intended for monitoring interfaces, such as a web page
        which describes connecting and connected peers.
        """

    def get_all_peerids():
        """Return a frozenset of all peerids to whom we have a connection (to
        one or more services) established. Mostly useful for unit tests."""

    def get_all_connections_for(service_name):
        """Return a frozenset of (nodeid, service_name, rref) tuples, one
        for each active connection that provides the given SERVICE_NAME."""

    def get_permuted_peers(service_name, key):
        """Returns an ordered list of (peerid, rref) tuples, selecting from
        the connections that provide SERVICE_NAME, using a hash-based
        permutation keyed by KEY. This randomizes the service list in a
        repeatable way, to distribute load over many peers.
        """

class IServer(Interface):
    """I live in the client, and represent a single server."""
    def start_connecting(tub, trigger_cb):
        pass
    def get_nickname():
        pass
    def get_rref():
        """Once a server is connected, I return a RemoteReference.
        Before a server is connected for the first time, I return None.

        Note that the rref I return will start producing DeadReferenceErrors
        once the connection is lost.
        """


class IMutableSlotWriter(Interface):
    """
    The interface for a writer around a mutable slot on a remote server.
    """
    def set_checkstring(checkstring, *args):
        """
        Set the checkstring that I will pass to the remote server when
        writing.

            @param checkstring A packed checkstring to use.

        Note that implementations can differ in which semantics they
        wish to support for set_checkstring -- they can, for example,
        build the checkstring themselves from its constituents, or
        some other thing.
        """

    def get_checkstring():
        """
        Get the checkstring that I think currently exists on the remote
        server.
        """

    def put_block(data, segnum, salt):
        """
        Add a block and salt to the share.
        """

    def put_encprivkey(encprivkey):
        """
        Add the encrypted private key to the share.
        """

    def put_blockhashes(blockhashes=list):
        """
        Add the block hash tree to the share.
        """

    def put_sharehashes(sharehashes=dict):
        """
        Add the share hash chain to the share.
        """

    def get_signable():
        """
        Return the part of the share that needs to be signed.
        """

    def put_signature(signature):
        """
        Add the signature to the share.
        """

    def put_verification_key(verification_key):
        """
        Add the verification key to the share.
        """

    def finish_publishing():
        """
        Do anything necessary to finish writing the share to a remote
        server. I require that no further publishing needs to take place
        after this method has been called.
        """


class IURI(Interface):
    def init_from_string(uri):
        """Accept a string (as created by my to_string() method) and populate
        this instance with its data. I am not normally called directly,
        please use the module-level uri.from_string() function to convert
        arbitrary URI strings into IURI-providing instances."""

    def is_readonly():
        """Return False if this URI be used to modify the data. Return True
        if this URI cannot be used to modify the data."""

    def is_mutable():
        """Return True if the data can be modified by *somebody* (perhaps
        someone who has a more powerful URI than this one)."""

    # TODO: rename to get_read_cap()
    def get_readonly():
        """Return another IURI instance, which represents a read-only form of
        this one. If is_readonly() is True, this returns self."""

    def get_verify_cap():
        """Return an instance that provides IVerifierURI, which can be used
        to check on the availability of the file or directory, without
        providing enough capabilities to actually read or modify the
        contents. This may return None if the file does not need checking or
        verification (e.g. LIT URIs).
        """

    def to_string():
        """Return a string of printable ASCII characters, suitable for
        passing into init_from_string."""

class IVerifierURI(Interface, IURI):
    def init_from_string(uri):
        """Accept a string (as created by my to_string() method) and populate
        this instance with its data. I am not normally called directly,
        please use the module-level uri.from_string() function to convert
        arbitrary URI strings into IURI-providing instances."""

    def to_string():
        """Return a string of printable ASCII characters, suitable for
        passing into init_from_string."""

class IDirnodeURI(Interface):
    """I am a URI which represents a dirnode."""

class IFileURI(Interface):
    """I am a URI which represents a filenode."""
    def get_size():
        """Return the length (in bytes) of the file that I represent."""

class IImmutableFileURI(IFileURI):
    pass

class IMutableFileURI(Interface):
    pass

class IDirectoryURI(Interface):
    pass

class IReadonlyDirectoryURI(Interface):
    pass

class CapConstraintError(Exception):
    """A constraint on a cap was violated."""

class MustBeDeepImmutableError(CapConstraintError):
    """Mutable children cannot be added to an immutable directory.
    Also, caps obtained from an immutable directory can trigger this error
    if they are later found to refer to a mutable object and then used."""

class MustBeReadonlyError(CapConstraintError):
    """Known write caps cannot be specified in a ro_uri field. Also,
    caps obtained from a ro_uri field can trigger this error if they
    are later found to be write caps and then used."""

class MustNotBeUnknownRWError(CapConstraintError):
    """Cannot add an unknown child cap specified in a rw_uri field."""


class IReadable(Interface):
    """I represent a readable object -- either an immutable file, or a
    specific version of a mutable file.
    """

    def is_readonly():
        """Return True if this reference provides mutable access to the given
        file or directory (i.e. if you can modify it), or False if not. Note
        that even if this reference is read-only, someone else may hold a
        read-write reference to it.

        For an IReadable returned by get_best_readable_version(), this will
        always return True, but for instances of subinterfaces such as
        IMutableFileVersion, it may return False."""

    def is_mutable():
        """Return True if this file or directory is mutable (by *somebody*,
        not necessarily you), False if it is is immutable. Note that a file
        might be mutable overall, but your reference to it might be
        read-only. On the other hand, all references to an immutable file
        will be read-only; there are no read-write references to an immutable
        file."""

    def get_storage_index():
        """Return the storage index of the file."""

    def get_size():
        """Return the length (in bytes) of this readable object."""

    def download_to_data():
        """Download all of the file contents. I return a Deferred that fires
        with the contents as a byte string."""

    def read(consumer, offset=0, size=None):
        """Download a portion (possibly all) of the file's contents, making
        them available to the given IConsumer. Return a Deferred that fires
        (with the consumer) when the consumer is unregistered (either because
        the last byte has been given to it, or because the consumer threw an
        exception during write(), possibly because it no longer wants to
        receive data). The portion downloaded will start at 'offset' and
        contain 'size' bytes (or the remainder of the file if size==None).

        The consumer will be used in non-streaming mode: an IPullProducer
        will be attached to it.

        The consumer will not receive data right away: several network trips
        must occur first. The order of events will be::

         consumer.registerProducer(p, streaming)
          (if streaming == False)::
           consumer does p.resumeProducing()
            consumer.write(data)
           consumer does p.resumeProducing()
            consumer.write(data).. (repeat until all data is written)
         consumer.unregisterProducer()
         deferred.callback(consumer)

        If a download error occurs, or an exception is raised by
        consumer.registerProducer() or consumer.write(), I will call
        consumer.unregisterProducer() and then deliver the exception via
        deferred.errback(). To cancel the download, the consumer should call
        p.stopProducing(), which will result in an exception being delivered
        via deferred.errback().

        See src/allmydata/util/consumer.py for an example of a simple
        download-to-memory consumer.
        """


class IWriteable(Interface):
    """
    I define methods that callers can use to update SDMF and MDMF
    mutable files on a Tahoe-LAFS grid.
    """
    # XXX: For the moment, we have only this. It is possible that we
    #      want to move overwrite() and modify() in here too.
    def update(data, offset):
        """
        I write the data from my data argument to the MDMF file,
        starting at offset. I continue writing data until my data
        argument is exhausted, appending data to the file as necessary.
        """
        # assert IMutableUploadable.providedBy(data)
        # to append data: offset=node.get_size_of_best_version()
        # do we want to support compacting MDMF?
        # for an MDMF file, this can be done with O(data.get_size())
        # memory. For an SDMF file, any modification takes
        # O(node.get_size_of_best_version()).


class IMutableFileVersion(IReadable):
    """I provide access to a particular version of a mutable file. The
    access is read/write if I was obtained from a filenode derived from
    a write cap, or read-only if the filenode was derived from a read cap.
    """

    def get_sequence_number():
        """Return the sequence number of this version."""

    def get_servermap():
        """Return the IMutableFileServerMap instance that was used to create
        this object.
        """

    def get_writekey():
        """Return this filenode's writekey, or None if the node does not have
        write-capability. This may be used to assist with data structures
        that need to make certain data available only to writers, such as the
        read-write child caps in dirnodes. The recommended process is to have
        reader-visible data be submitted to the filenode in the clear (where
        it will be encrypted by the filenode using the readkey), but encrypt
        writer-visible data using this writekey.
        """

    # TODO: Can this be overwrite instead of replace?
    def replace(new_contents):
        """Replace the contents of the mutable file, provided that no other
        node has published (or is attempting to publish, concurrently) a
        newer version of the file than this one.

        I will avoid modifying any share that is different than the version
        given by get_sequence_number(). However, if another node is writing
        to the file at the same time as me, I may manage to update some shares
        while they update others. If I see any evidence of this, I will signal
        UncoordinatedWriteError, and the file will be left in an inconsistent
        state (possibly the version you provided, possibly the old version,
        possibly somebody else's version, and possibly a mix of shares from
        all of these).

        The recommended response to UncoordinatedWriteError is to either
        return it to the caller (since they failed to coordinate their
        writes), or to attempt some sort of recovery. It may be sufficient to
        wait a random interval (with exponential backoff) and repeat your
        operation. If I do not signal UncoordinatedWriteError, then I was
        able to write the new version without incident.

        I return a Deferred that fires (with a PublishStatus object) when the
        update has completed.
        """

    def modify(modifier_cb):
        """Modify the contents of the file, by downloading this version,
        applying the modifier function (or bound method), then uploading
        the new version. This will succeed as long as no other node
        publishes a version between the download and the upload.
        I return a Deferred that fires (with a PublishStatus object) when
        the update is complete.

        The modifier callable will be given three arguments: a string (with
        the old contents), a 'first_time' boolean, and a servermap. As with
        download_to_data(), the old contents will be from this version,
        but the modifier can use the servermap to make other decisions
        (such as refusing to apply the delta if there are multiple parallel
        versions, or if there is evidence of a newer unrecoverable version).
        'first_time' will be True the first time the modifier is called,
        and False on any subsequent calls.

        The callable should return a string with the new contents. The
        callable must be prepared to be called multiple times, and must
        examine the input string to see if the change that it wants to make
        is already present in the old version. If it does not need to make
        any changes, it can either return None, or return its input string.

        If the modifier raises an exception, it will be returned in the
        errback.
        """


# The hierarchy looks like this:
#  IFilesystemNode
#   IFileNode
#    IMutableFileNode
#    IImmutableFileNode
#   IDirectoryNode

class IFilesystemNode(Interface):
    def get_cap():
        """Return the strongest 'cap instance' associated with this node.
        (writecap for writeable-mutable files/directories, readcap for
        immutable or readonly-mutable files/directories). To convert this
        into a string, call .to_string() on the result."""

    def get_readcap():
        """Return a readonly cap instance for this node. For immutable or
        readonly nodes, get_cap() and get_readcap() return the same thing."""

    def get_repair_cap():
        """Return an IURI instance that can be used to repair the file, or
        None if this node cannot be repaired (either because it is not
        distributed, like a LIT file, or because the node does not represent
        sufficient authority to create a repair-cap, like a read-only RSA
        mutable file node [which cannot create the correct write-enablers]).
        """

    def get_verify_cap():
        """Return an IVerifierURI instance that represents the
        'verifiy/refresh capability' for this node. The holder of this
        capability will be able to renew the lease for this node, protecting
        it from garbage-collection. They will also be able to ask a server if
        it holds a share for the file or directory.
        """

    def get_uri():
        """Return the URI string corresponding to the strongest cap associated
        with this node. If this node is read-only, the URI will only offer
        read-only access. If this node is read-write, the URI will offer
        read-write access.

        If you have read-write access to a node and wish to share merely
        read-only access with others, use get_readonly_uri().
        """

    def get_write_uri():
        """Return the URI string that can be used by others to get write
        access to this node, if it is writeable. If this is a read-only node,
        return None."""

    def get_readonly_uri():
        """Return the URI string that can be used by others to get read-only
        access to this node. The result is a read-only URI, regardless of
        whether this node is read-only or read-write.

        If you have merely read-only access to this node, get_readonly_uri()
        will return the same thing as get_uri().
        """

    def get_storage_index():
        """Return a string with the (binary) storage index in use on this
        download. This may be None if there is no storage index (i.e. LIT
        files and directories)."""

    def is_readonly():
        """Return True if this reference provides mutable access to the given
        file or directory (i.e. if you can modify it), or False if not. Note
        that even if this reference is read-only, someone else may hold a
        read-write reference to it."""

    def is_mutable():
        """Return True if this file or directory is mutable (by *somebody*,
        not necessarily you), False if it is is immutable. Note that a file
        might be mutable overall, but your reference to it might be
        read-only. On the other hand, all references to an immutable file
        will be read-only; there are no read-write references to an immutable
        file.
        """

    def is_unknown():
        """Return True if this is an unknown node."""

    def is_allowed_in_immutable_directory():
        """Return True if this node is allowed as a child of a deep-immutable
        directory. This is true if either the node is of a known-immutable type,
        or it is unknown and read-only.
        """

    def raise_error():
        """Raise any error associated with this node."""

    # XXX: These may not be appropriate outside the context of an IReadable.
    def get_size():
        """Return the length (in bytes) of the data this node represents. For
        directory nodes, I return the size of the backing store. I return
        synchronously and do not consult the network, so for mutable objects,
        I will return the most recently observed size for the object, or None
        if I don't remember a size. Use get_current_size, which returns a
        Deferred, if you want more up-to-date information."""

    def get_current_size():
        """I return a Deferred that fires with the length (in bytes) of the
        data this node represents.
        """

class IFileNode(IFilesystemNode):
    """I am a node which represents a file: a sequence of bytes. I am not a
    container, like IDirectoryNode."""
    def get_best_readable_version():
        """Return a Deferred that fires with an IReadable for the 'best'
        available version of the file. The IReadable provides only read
        access, even if this filenode was derived from a write cap.

        For an immutable file, there is only one version. For a mutable
        file, the 'best' version is the recoverable version with the
        highest sequence number. If no uncoordinated writes have occurred,
        and if enough shares are available, then this will be the most
        recent version that has been uploaded. If no version is recoverable,
        the Deferred will errback with an UnrecoverableFileError.
        """

    def download_best_version():
        """Download the contents of the version that would be returned
        by get_best_readable_version(). This is equivalent to calling
        download_to_data() on the IReadable given by that method.

        I return a Deferred that fires with a byte string when the file
        has been fully downloaded. To support streaming download, use
        the 'read' method of IReadable. If no version is recoverable,
        the Deferred will errback with an UnrecoverableFileError.
        """

    def get_size_of_best_version():
        """Find the size of the version that would be returned by
        get_best_readable_version().

        I return a Deferred that fires with an integer. If no version
        is recoverable, the Deferred will errback with an
        UnrecoverableFileError.
        """


class IImmutableFileNode(IFileNode, IReadable):
    """I am a node representing an immutable file. Immutable files have
    only one version"""


class IMutableFileNode(IFileNode):
    """I provide access to a 'mutable file', which retains its identity
    regardless of what contents are put in it.

    The consistency-vs-availability problem means that there might be
    multiple versions of a file present in the grid, some of which might be
    unrecoverable (i.e. have fewer than 'k' shares). These versions are
    loosely ordered: each has a sequence number and a hash, and any version
    with seqnum=N was uploaded by a node which has seen at least one version
    with seqnum=N-1.

    The 'servermap' (an instance of IMutableFileServerMap) is used to
    describe the versions that are known to be present in the grid, and which
    servers are hosting their shares. It is used to represent the 'state of
    the world', and is used for this purpose by my test-and-set operations.
    Downloading the contents of the mutable file will also return a
    servermap. Uploading a new version into the mutable file requires a
    servermap as input, and the semantics of the replace operation is
    'replace the file with my new version if it looks like nobody else has
    changed the file since my previous download'. Because the file is
    distributed, this is not a perfect test-and-set operation, but it will do
    its best. If the replace process sees evidence of a simultaneous write,
    it will signal an UncoordinatedWriteError, so that the caller can take
    corrective action.


    Most readers will want to use the 'best' current version of the file, and
    should use my 'download_best_version()' method.

    To unconditionally replace the file, callers should use overwrite(). This
    is the mode that user-visible mutable files will probably use.

    To apply some delta to the file, call modify() with a callable modifier
    function that can apply the modification that you want to make. This is
    the mode that dirnodes will use, since most directory modification
    operations can be expressed in terms of deltas to the directory state.


    Three methods are available for users who need to perform more complex
    operations. The first is get_servermap(), which returns an up-to-date
    servermap using a specified mode. The second is download_version(), which
    downloads a specific version (not necessarily the 'best' one). The third
    is 'upload', which accepts new contents and a servermap (which must have
    been updated with MODE_WRITE). The upload method will attempt to apply
    the new contents as long as no other node has modified the file since the
    servermap was updated. This might be useful to a caller who wants to
    merge multiple versions into a single new one.

    Note that each time the servermap is updated, a specific 'mode' is used,
    which determines how many peers are queried. To use a servermap for my
    replace() method, that servermap must have been updated in MODE_WRITE.
    These modes are defined in allmydata.mutable.common, and consist of
    MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
    allmydata/mutable/servermap.py for details about the differences.

    Mutable files are currently limited in size (about 3.5MB max) and can
    only be retrieved and updated all-at-once, as a single big string. Future
    versions of our mutable files will remove this restriction.
    """
    def get_best_mutable_version():
        """Return a Deferred that fires with an IMutableFileVersion for
        the 'best' available version of the file. The best version is
        the recoverable version with the highest sequence number. If no
        uncoordinated writes have occurred, and if enough shares are
        available, then this will be the most recent version that has
        been uploaded.

        If no version is recoverable, the Deferred will errback with an
        UnrecoverableFileError.
        """

    def overwrite(new_contents):
        """Unconditionally replace the contents of the mutable file with new
        ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
        is only appropriate to use when the new contents of the file are
        completely unrelated to the old ones, and you do not care about other
        clients' changes.

        I return a Deferred that fires (with a PublishStatus object) when the
        update has completed.
        """

    def modify(modifier_cb):
        """Modify the contents of the file, by downloading the current
        version, applying the modifier function (or bound method), then
        uploading the new version. I return a Deferred that fires (with a
        PublishStatus object) when the update is complete.

        The modifier callable will be given three arguments: a string (with
        the old contents), a 'first_time' boolean, and a servermap. As with
        download_best_version(), the old contents will be from the best
        recoverable version, but the modifier can use the servermap to make
        other decisions (such as refusing to apply the delta if there are
        multiple parallel versions, or if there is evidence of a newer
        unrecoverable version). 'first_time' will be True the first time the
        modifier is called, and False on any subsequent calls.

        The callable should return a string with the new contents. The
        callable must be prepared to be called multiple times, and must
        examine the input string to see if the change that it wants to make
        is already present in the old version. If it does not need to make
        any changes, it can either return None, or return its input string.

        If the modifier raises an exception, it will be returned in the
        errback.
        """

    def get_servermap(mode):
        """Return a Deferred that fires with an IMutableFileServerMap
        instance, updated using the given mode.
        """

    def download_version(servermap, version):
        """Download a specific version of the file, using the servermap
        as a guide to where the shares are located.

        I return a Deferred that fires with the requested contents, or
        errbacks with UnrecoverableFileError. Note that a servermap which was
        updated with MODE_ANYTHING or MODE_READ may not know about shares for
        all versions (those modes stop querying servers as soon as they can
        fulfil their goals), so you may want to use MODE_CHECK (which checks
        everything) to get increased visibility.
        """

    def upload(new_contents, servermap):
        """Replace the contents of the file with new ones. This requires a
        servermap that was previously updated with MODE_WRITE.

        I attempt to provide test-and-set semantics, in that I will avoid
        modifying any share that is different than the version I saw in the
        servermap. However, if another node is writing to the file at the
        same time as me, I may manage to update some shares while they update
        others. If I see any evidence of this, I will signal
        UncoordinatedWriteError, and the file will be left in an inconsistent
        state (possibly the version you provided, possibly the old version,
        possibly somebody else's version, and possibly a mix of shares from
        all of these).

        The recommended response to UncoordinatedWriteError is to either
        return it to the caller (since they failed to coordinate their
        writes), or to attempt some sort of recovery. It may be sufficient to
        wait a random interval (with exponential backoff) and repeat your
        operation. If I do not signal UncoordinatedWriteError, then I was
        able to write the new version without incident.

        I return a Deferred that fires (with a PublishStatus object) when the
        publish has completed. I will update the servermap in-place with the
        location of all new shares.
        """

    def get_writekey():
        """Return this filenode's writekey, or None if the node does not have
        write-capability. This may be used to assist with data structures
        that need to make certain data available only to writers, such as the
        read-write child caps in dirnodes. The recommended process is to have
        reader-visible data be submitted to the filenode in the clear (where
        it will be encrypted by the filenode using the readkey), but encrypt
        writer-visible data using this writekey.
        """

    def get_version():
        """Returns the mutable file protocol version."""

class NotEnoughSharesError(Exception):
    """Download was unable to get enough shares"""

class NoSharesError(Exception):
    """Download was unable to get any shares at all."""

class DownloadStopped(Exception):
    pass

class UploadUnhappinessError(Exception):
    """Upload was unable to satisfy 'servers_of_happiness'"""

class UnableToFetchCriticalDownloadDataError(Exception):
    """I was unable to fetch some piece of critical data which is supposed to
    be identically present in all shares."""

class NoServersError(Exception):
    """Upload wasn't given any servers to work with, usually indicating a
    network or Introducer problem."""

class ExistingChildError(Exception):
    """A directory node was asked to add or replace a child that already
    exists, and overwrite= was set to False."""

class NoSuchChildError(Exception):
    """A directory node was asked to fetch a child which does not exist."""
    def __str__(self):
        # avoid UnicodeEncodeErrors when converting to str
        return self.__repr__()

class ChildOfWrongTypeError(Exception):
    """An operation was attempted on a child of the wrong type (file or directory)."""

class IDirectoryNode(IFilesystemNode):
    """I represent a filesystem node that is a container, with a
    name-to-child mapping, holding the tahoe equivalent of a directory. All
    child names are unicode strings, and all children are some sort of
    IFilesystemNode (a file, subdirectory, or unknown node).
    """

    def get_uri():
        """
        The dirnode ('1') URI returned by this method can be used in
        set_uri() on a different directory ('2') to 'mount' a reference to
        this directory ('1') under the other ('2'). This URI is just a
        string, so it can be passed around through email or other out-of-band
        protocol.
        """

    def get_readonly_uri():
        """
        The dirnode ('1') URI returned by this method can be used in
        set_uri() on a different directory ('2') to 'mount' a reference to
        this directory ('1') under the other ('2'). This URI is just a
        string, so it can be passed around through email or other out-of-band
        protocol.
        """

    def list():
        """I return a Deferred that fires with a dictionary mapping child
        name (a unicode string) to (node, metadata_dict) tuples, in which
        'node' is an IFilesystemNode and 'metadata_dict' is a dictionary of
        metadata."""

    def has_child(name):
        """I return a Deferred that fires with a boolean, True if there
        exists a child of the given name, False if not. The child name must
        be a unicode string."""

    def get(name):
        """I return a Deferred that fires with a specific named child node,
        which is an IFilesystemNode. The child name must be a unicode string.
        I raise NoSuchChildError if I do not have a child by that name."""

    def get_metadata_for(name):
        """I return a Deferred that fires with the metadata dictionary for
        a specific named child node. The child name must be a unicode string.
        This metadata is stored in the *edge*, not in the child, so it is
        attached to the parent dirnode rather than the child node.
        I raise NoSuchChildError if I do not have a child by that name."""

    def set_metadata_for(name, metadata):
        """I replace any existing metadata for the named child with the new
        metadata. The child name must be a unicode string. This metadata is
        stored in the *edge*, not in the child, so it is attached to the
        parent dirnode rather than the child node. I return a Deferred
        (that fires with this dirnode) when the operation is complete.
        I raise NoSuchChildError if I do not have a child by that name."""

    def get_child_at_path(path):
        """Transform a child path into an IFilesystemNode.

        I perform a recursive series of 'get' operations to find the named
        descendant node. I return a Deferred that fires with the node, or
        errbacks with NoSuchChildError if the node could not be found.

        The path can be either a single string (slash-separated) or a list of
        path-name elements. All elements must be unicode strings.
        """

    def get_child_and_metadata_at_path(path):
        """Transform a child path into an IFilesystemNode and metadata.

        I am like get_child_at_path(), but my Deferred fires with a tuple of
        (node, metadata). The metadata comes from the last edge. If the path
        is empty, the metadata will be an empty dictionary.
        """

    def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True):
        """I add a child (by writecap+readcap) at the specific name. I return
        a Deferred that fires when the operation finishes. If overwrite= is
        True, I will replace any existing child of the same name, otherwise
        an existing child will cause me to return ExistingChildError. The
        child name must be a unicode string.

        The child caps could be for a file, or for a directory. If you have
        both the writecap and readcap, you should provide both arguments.
        If you have only one cap and don't know whether it is read-only,
        provide it as the writecap argument and leave the readcap as None.
        If you have only one cap that is known to be read-only, provide it
        as the readcap argument and leave the writecap as None.
        The filecaps are typically obtained from an IFilesystemNode with
        get_uri() and get_readonly_uri().

        If metadata= is provided, I will use it as the metadata for the named
        edge. This will replace any existing metadata. If metadata= is left
        as the default value of None, I will set ['mtime'] to the current
        time, and I will set ['ctime'] to the current time if there was not
        already a child by this name present. This roughly matches the
        ctime/mtime semantics of traditional filesystems.  See the
        "About the metadata" section of webapi.txt for futher information.

        If this directory node is read-only, the Deferred will errback with a
        NotWriteableError."""

    def set_children(entries, overwrite=True):
        """Add multiple children (by writecap+readcap) to a directory node.
        Takes a dictionary, with childname as keys and (writecap, readcap)
        tuples (or (writecap, readcap, metadata) triples) as values. Returns
        a Deferred that fires (with this dirnode) when the operation
        finishes. This is equivalent to calling set_uri() multiple times, but
        is much more efficient. All child names must be unicode strings.
        """

    def set_node(name, child, metadata=None, overwrite=True):
        """I add a child at the specific name. I return a Deferred that fires
        when the operation finishes. This Deferred will fire with the child
        node that was just added. I will replace any existing child of the
        same name. The child name must be a unicode string. The 'child'
        instance must be an instance providing IFilesystemNode.

        If metadata= is provided, I will use it as the metadata for the named
        edge. This will replace any existing metadata. If metadata= is left
        as the default value of None, I will set ['mtime'] to the current
        time, and I will set ['ctime'] to the current time if there was not
        already a child by this name present. This roughly matches the
        ctime/mtime semantics of traditional filesystems. See the
        "About the metadata" section of webapi.txt for futher information.

        If this directory node is read-only, the Deferred will errback with a
        NotWriteableError."""

    def set_nodes(entries, overwrite=True):
        """Add multiple children to a directory node. Takes a dict mapping
        unicode childname to (child_node, metdata) tuples. If metdata=None,
        the original metadata is left unmodified. Returns a Deferred that
        fires (with this dirnode) when the operation finishes. This is
        equivalent to calling set_node() multiple times, but is much more
        efficient."""

    def add_file(name, uploadable, metadata=None, overwrite=True):
        """I upload a file (using the given IUploadable), then attach the
        resulting ImmutableFileNode to the directory at the given name. I set
        metadata the same way as set_uri and set_node. The child name must be
        a unicode string.

        I return a Deferred that fires (with the IFileNode of the uploaded
        file) when the operation completes."""

    def delete(name, must_exist=True, must_be_directory=False, must_be_file=False):
        """I remove the child at the specific name. I return a Deferred that
        fires when the operation finishes. The child name must be a unicode
        string. If must_exist is True and I do not have a child by that name,
        I raise NoSuchChildError. If must_be_directory is True and the child
        is a file, or if must_be_file is True and the child is a directory,
        I raise ChildOfWrongTypeError."""

    def create_subdirectory(name, initial_children={}, overwrite=True, metadata=None):
        """I create and attach a directory at the given name. The new
        directory can be empty, or it can be populated with children
        according to 'initial_children', which takes a dictionary in the same
        format as set_nodes (i.e. mapping unicode child name to (childnode,
        metadata) tuples). The child name must be a unicode string. I return
        a Deferred that fires (with the new directory node) when the
        operation finishes."""

    def move_child_to(current_child_name, new_parent, new_child_name=None,
                      overwrite=True):
        """I take one of my children and move them to a new parent. The child
        is referenced by name. On the new parent, the child will live under
        'new_child_name', which defaults to 'current_child_name'. TODO: what
        should we do about metadata? I return a Deferred that fires when the
        operation finishes. The child name must be a unicode string. I raise
        NoSuchChildError if I do not have a child by that name."""

    def build_manifest():
        """I generate a table of everything reachable from this directory.
        I also compute deep-stats as described below.

        I return a Monitor. The Monitor's results will be a dictionary with
        four elements:

         res['manifest']: a list of (path, cap) tuples for all nodes
                          (directories and files) reachable from this one.
                          'path' will be a tuple of unicode strings. The
                          origin dirnode will be represented by an empty path
                          tuple.
         res['verifycaps']: a list of (printable) verifycap strings, one for
                            each reachable non-LIT node. This is a set:
                            it will contain no duplicates.
         res['storage-index']: a list of (base32) storage index strings,
                               one for each reachable non-LIT node. This is
                               a set: it will contain no duplicates.
         res['stats']: a dictionary, the same that is generated by
                       start_deep_stats() below.

        The Monitor will also have an .origin_si attribute with the (binary)
        storage index of the starting point.
        """

    def start_deep_stats():
        """Return a Monitor, examining all nodes (directories and files)
        reachable from this one. The Monitor's results will be a dictionary
        with the following keys::

           count-immutable-files: count of how many CHK files are in the set
           count-mutable-files: same, for mutable files (does not include
                                directories)
           count-literal-files: same, for LIT files
           count-files: sum of the above three

           count-directories: count of directories

           size-immutable-files: total bytes for all CHK files in the set
           size-mutable-files (TODO): same, for current version of all mutable
                                      files, does not include directories
           size-literal-files: same, for LIT files
           size-directories: size of mutable files used by directories

           largest-directory: number of bytes in the largest directory
           largest-directory-children: number of children in the largest
                                       directory
           largest-immutable-file: number of bytes in the largest CHK file

        size-mutable-files is not yet implemented, because it would involve
        even more queries than deep_stats does.

        The Monitor will also have an .origin_si attribute with the (binary)
        storage index of the starting point.

        This operation will visit every directory node underneath this one,
        and can take a long time to run. On a typical workstation with good
        bandwidth, this can examine roughly 15 directories per second (and
        takes several minutes of 100% CPU for ~1700 directories).
        """

class ICodecEncoder(Interface):
    def set_params(data_size, required_shares, max_shares):
        """Set up the parameters of this encoder.

        This prepares the encoder to perform an operation that converts a
        single block of data into a number of shares, such that a future
        ICodecDecoder can use a subset of these shares to recover the
        original data. This operation is invoked by calling encode(). Once
        the encoding parameters are set up, the encode operation can be
        invoked multiple times.

        set_params() prepares the encoder to accept blocks of input data that
        are exactly 'data_size' bytes in length. The encoder will be prepared
        to produce 'max_shares' shares for each encode() operation (although
        see the 'desired_share_ids' to use less CPU). The encoding math will
        be chosen such that the decoder can get by with as few as
        'required_shares' of these shares and still reproduce the original
        data. For example, set_params(1000, 5, 5) offers no redundancy at
        all, whereas set_params(1000, 1, 10) provides 10x redundancy.

        Numerical Restrictions: 'data_size' is required to be an integral
        multiple of 'required_shares'. In general, the caller should choose
        required_shares and max_shares based upon their reliability
        requirements and the number of peers available (the total storage
        space used is roughly equal to max_shares*data_size/required_shares),
        then choose data_size to achieve the memory footprint desired (larger
        data_size means more efficient operation, smaller data_size means
        smaller memory footprint).

        In addition, 'max_shares' must be equal to or greater than
        'required_shares'. Of course, setting them to be equal causes
        encode() to degenerate into a particularly slow form of the 'split'
        utility.

        See encode() for more details about how these parameters are used.

        set_params() must be called before any other ICodecEncoder methods
        may be invoked.
        """

    def get_params():
        """Return the 3-tuple of data_size, required_shares, max_shares"""

    def get_encoder_type():
        """Return a short string that describes the type of this encoder.

        There is required to be a global table of encoder classes. This method
        returns an index into this table; the value at this index is an
        encoder class, and this encoder is an instance of that class.
        """

    def get_block_size():
        """Return the length of the shares that encode() will produce.
        """

    def encode_proposal(data, desired_share_ids=None):
        """Encode some data.

        'data' must be a string (or other buffer object), and len(data) must
        be equal to the 'data_size' value passed earlier to set_params().

        This will return a Deferred that will fire with two lists. The first
        is a list of shares, each of which is a string (or other buffer
        object) such that len(share) is the same as what get_share_size()
        returned earlier. The second is a list of shareids, in which each is
        an integer. The lengths of the two lists will always be equal to each
        other. The user should take care to keep each share closely
        associated with its shareid, as one is useless without the other.

        The length of this output list will normally be the same as the value
        provided to the 'max_shares' parameter of set_params(). This may be
        different if 'desired_share_ids' is provided.

        'desired_share_ids', if provided, is required to be a sequence of
        ints, each of which is required to be >= 0 and < max_shares. If not
        provided, encode() will produce 'max_shares' shares, as if
        'desired_share_ids' were set to range(max_shares). You might use this
        if you initially thought you were going to use 10 peers, started
        encoding, and then two of the peers dropped out: you could use
        desired_share_ids= to skip the work (both memory and CPU) of
        producing shares for the peers which are no longer available.

        """

    def encode(inshares, desired_share_ids=None):
        """Encode some data. This may be called multiple times. Each call is
        independent.

        inshares is a sequence of length required_shares, containing buffers
        (i.e. strings), where each buffer contains the next contiguous
        non-overlapping segment of the input data. Each buffer is required to
        be the same length, and the sum of the lengths of the buffers is
        required to be exactly the data_size promised by set_params(). (This
        implies that the data has to be padded before being passed to
        encode(), unless of course it already happens to be an even multiple
        of required_shares in length.)

        Note: the requirement to break up your data into
        'required_shares' chunks of exactly the right length before
        calling encode() is surprising from point of view of a user
        who doesn't know how FEC works. It feels like an
        implementation detail that has leaked outside the abstraction
        barrier. Is there a use case in which the data to be encoded
        might already be available in pre-segmented chunks, such that
        it is faster or less work to make encode() take a list rather
        than splitting a single string?

        Yes, there is: suppose you are uploading a file with K=64,
        N=128, segsize=262,144. Then each in-share will be of size
        4096. If you use this .encode() API then your code could first
        read each successive 4096-byte chunk from the file and store
        each one in a Python string and store each such Python string
        in a Python list. Then you could call .encode(), passing that
        list as "inshares". The encoder would generate the other 64
        "secondary shares" and return to you a new list containing
        references to the same 64 Python strings that you passed in
        (as the primary shares) plus references to the new 64 Python
        strings.

        (You could even imagine that your code could use readv() so
        that the operating system can arrange to get all of those
        bytes copied from the file into the Python list of Python
        strings as efficiently as possible instead of having a loop
        written in C or in Python to copy the next part of the file
        into the next string.)

        On the other hand if you instead use the .encode_proposal()
        API (above), then your code can first read in all of the
        262,144 bytes of the segment from the file into a Python
        string, then call .encode_proposal() passing the segment data
        as the "data" argument. The encoder would basically first
        split the "data" argument into a list of 64 in-shares of 4096
        byte each, and then do the same thing that .encode() does. So
        this would result in a little bit more copying of data and a
        little bit higher of a "maximum memory usage" during the
        process, although it might or might not make a practical
        difference for our current use cases.

        Note that "inshares" is a strange name for the parameter if
        you think of the parameter as being just for feeding in data
        to the codec. It makes more sense if you think of the result
        of this encoding as being the set of shares from inshares plus
        an extra set of "secondary shares" (or "check shares"). It is
        a surprising name! If the API is going to be surprising then
        the name should be surprising. If we switch to
        encode_proposal() above then we should also switch to an
        unsurprising name.

        'desired_share_ids', if provided, is required to be a sequence of
        ints, each of which is required to be >= 0 and < max_shares. If not
        provided, encode() will produce 'max_shares' shares, as if
        'desired_share_ids' were set to range(max_shares). You might use this
        if you initially thought you were going to use 10 peers, started
        encoding, and then two of the peers dropped out: you could use
        desired_share_ids= to skip the work (both memory and CPU) of
        producing shares for the peers which are no longer available.

        For each call, encode() will return a Deferred that fires with two
        lists, one containing shares and the other containing the shareids.
        The get_share_size() method can be used to determine the length of
        the share strings returned by encode(). Each shareid is a small
        integer, exactly as passed into 'desired_share_ids' (or
        range(max_shares), if desired_share_ids was not provided).

        The shares and their corresponding shareids are required to be kept
        together during storage and retrieval. Specifically, the share data is
        useless by itself: the decoder needs to be told which share is which
        by providing it with both the shareid and the actual share data.

        This function will allocate an amount of memory roughly equal to::

         (max_shares - required_shares) * get_share_size()

        When combined with the memory that the caller must allocate to
        provide the input data, this leads to a memory footprint roughly
        equal to the size of the resulting encoded shares (i.e. the expansion
        factor times the size of the input segment).
        """

        # rejected ideas:
        #
        #  returning a list of (shareidN,shareN) tuples instead of a pair of
        #  lists (shareids..,shares..). Brian thought the tuples would
        #  encourage users to keep the share and shareid together throughout
        #  later processing, Zooko pointed out that the code to iterate
        #  through two lists is not really more complicated than using a list
        #  of tuples and there's also a performance improvement
        #
        #  having 'data_size' not required to be an integral multiple of
        #  'required_shares'. Doing this would require encode() to perform
        #  padding internally, and we'd prefer to have any padding be done
        #  explicitly by the caller. Yes, it is an abstraction leak, but
        #  hopefully not an onerous one.


class ICodecDecoder(Interface):
    def set_params(data_size, required_shares, max_shares):
        """Set the params. They have to be exactly the same ones that were
        used for encoding."""

    def get_needed_shares():
        """Return the number of shares needed to reconstruct the data.
        set_params() is required to be called before this."""

    def decode(some_shares, their_shareids):
        """Decode a partial list of shares into data.

        'some_shares' is required to be a sequence of buffers of sharedata, a
        subset of the shares returned by ICodecEncode.encode(). Each share is
        required to be of the same length.  The i'th element of their_shareids
        is required to be the shareid of the i'th buffer in some_shares.

        This returns a Deferred which fires with a sequence of buffers. This
        sequence will contain all of the segments of the original data, in
        order. The sum of the lengths of all of the buffers will be the
        'data_size' value passed into the original ICodecEncode.set_params()
        call. To get back the single original input block of data, use
        ''.join(output_buffers), or you may wish to simply write them in
        order to an output file.

        Note that some of the elements in the result sequence may be
        references to the elements of the some_shares input sequence. In
        particular, this means that if those share objects are mutable (e.g.
        arrays) and if they are changed, then both the input (the
        'some_shares' parameter) and the output (the value given when the
        deferred is triggered) will change.

        The length of 'some_shares' is required to be exactly the value of
        'required_shares' passed into the original ICodecEncode.set_params()
        call.
        """

class IEncoder(Interface):
    """I take an object that provides IEncryptedUploadable, which provides
    encrypted data, and a list of shareholders. I then encode, hash, and
    deliver shares to those shareholders. I will compute all the necessary
    Merkle hash trees that are necessary to validate the crypttext that
    eventually comes back from the shareholders. I provide the URI Extension
    Block Hash, and the encoding parameters, both of which must be included
    in the URI.

    I do not choose shareholders, that is left to the IUploader. I must be
    given a dict of RemoteReferences to storage buckets that are ready and
    willing to receive data.
    """

    def set_size(size):
        """Specify the number of bytes that will be encoded. This must be
        peformed before get_serialized_params() can be called.
        """
    def set_params(params):
        """Override the default encoding parameters. 'params' is a tuple of
        (k,d,n), where 'k' is the number of required shares, 'd' is the
        servers_of_happiness, and 'n' is the total number of shares that will
        be created.

        Encoding parameters can be set in three ways. 1: The Encoder class
        provides defaults (3/7/10). 2: the Encoder can be constructed with
        an 'options' dictionary, in which the
        needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3:
        set_params((k,d,n)) can be called.

        If you intend to use set_params(), you must call it before
        get_share_size or get_param are called.
        """

    def set_encrypted_uploadable(u):
        """Provide a source of encrypted upload data. 'u' must implement
        IEncryptedUploadable.

        When this is called, the IEncryptedUploadable will be queried for its
        length and the storage_index that should be used.

        This returns a Deferred that fires with this Encoder instance.

        This must be performed before start() can be called.
        """

    def get_param(name):
        """Return an encoding parameter, by name.

        'storage_index': return a string with the (16-byte truncated SHA-256
                         hash) storage index to which these shares should be
                         pushed.

        'share_counts': return a tuple describing how many shares are used:
                        (needed_shares, servers_of_happiness, total_shares)

        'num_segments': return an int with the number of segments that
                        will be encoded.

        'segment_size': return an int with the size of each segment.

        'block_size': return the size of the individual blocks that will
                      be delivered to a shareholder's put_block() method. By
                      knowing this, the shareholder will be able to keep all
                      blocks in a single file and still provide random access
                      when reading them. # TODO: can we avoid exposing this?

        'share_size': an int with the size of the data that will be stored
                      on each shareholder. This is aggregate amount of data
                      that will be sent to the shareholder, summed over all
                      the put_block() calls I will ever make. It is useful to
                      determine this size before asking potential
                      shareholders whether they will grant a lease or not,
                      since their answers will depend upon how much space we
                      need. TODO: this might also include some amount of
                      overhead, like the size of all the hashes. We need to
                      decide whether this is useful or not.

        'serialized_params': a string with a concise description of the
                             codec name and its parameters. This may be passed
                             into the IUploadable to let it make sure that
                             the same file encoded with different parameters
                             will result in different storage indexes.

        Once this is called, set_size() and set_params() may not be called.
        """

    def set_shareholders(shareholders, servermap):
        """Tell the encoder where to put the encoded shares. 'shareholders'
        must be a dictionary that maps share number (an integer ranging from
        0 to n-1) to an instance that provides IStorageBucketWriter.
        'servermap' is a dictionary that maps share number (as defined above)
        to a set of peerids. This must be performed before start() can be
        called."""

    def start():
        """Begin the encode/upload process. This involves reading encrypted
        data from the IEncryptedUploadable, encoding it, uploading the shares
        to the shareholders, then sending the hash trees.

        set_encrypted_uploadable() and set_shareholders() must be called
        before this can be invoked.

        This returns a Deferred that fires with a verify cap when the upload
        process is complete. The verifycap, plus the encryption key, is
        sufficient to construct the read cap.
        """

class IDecoder(Interface):
    """I take a list of shareholders and some setup information, then
    download, validate, decode, and decrypt data from them, writing the
    results to an output file.

    I do not locate the shareholders, that is left to the IDownloader. I must
    be given a dict of RemoteReferences to storage buckets that are ready to
    send data.
    """

    def setup(outfile):
        """I take a file-like object (providing write and close) to which all
        the plaintext data will be written.

        TODO: producer/consumer . Maybe write() should return a Deferred that
        indicates when it will accept more data? But probably having the
        IDecoder be a producer is easier to glue to IConsumer pieces.
        """

    def set_shareholders(shareholders):
        """I take a dictionary that maps share identifiers (small integers)
        to RemoteReferences that provide RIBucketReader. This must be called
        before start()."""

    def start():
        """I start the download. This process involves retrieving data and
        hash chains from the shareholders, using the hashes to validate the
        data, decoding the shares into segments, decrypting the segments,
        then writing the resulting plaintext to the output file.

        I return a Deferred that will fire (with self) when the download is
        complete.
        """

class IDownloadTarget(Interface):
    # Note that if the IDownloadTarget is also an IConsumer, the downloader
    # will register itself as a producer. This allows the target to invoke
    # downloader.pauseProducing, resumeProducing, and stopProducing.
    def open(size):
        """Called before any calls to write() or close(). If an error
        occurs before any data is available, fail() may be called without
        a previous call to open().

        'size' is the length of the file being downloaded, in bytes."""

    def write(data):
        """Output some data to the target."""
    def close():
        """Inform the target that there is no more data to be written."""
    def fail(why):
        """fail() is called to indicate that the download has failed. 'why'
        is a Failure object indicating what went wrong. No further methods
        will be invoked on the IDownloadTarget after fail()."""
    def register_canceller(cb):
        """The CiphertextDownloader uses this to register a no-argument function
        that the target can call to cancel the download. Once this canceller
        is invoked, no further calls to write() or close() will be made."""
    def finish():
        """When the CiphertextDownloader is done, this finish() function will be
        called. Whatever it returns will be returned to the invoker of
        Downloader.download.
        """

class IDownloader(Interface):
    def download(uri, target):
        """Perform a CHK download, sending the data to the given target.
        'target' must provide IDownloadTarget.

        Returns a Deferred that fires (with the results of target.finish)
        when the download is finished, or errbacks if something went wrong."""

class IEncryptedUploadable(Interface):
    def set_upload_status(upload_status):
        """Provide an IUploadStatus object that should be filled with status
        information. The IEncryptedUploadable is responsible for setting
        key-determination progress ('chk'), size, storage_index, and
        ciphertext-fetch progress. It may delegate some of this
        responsibility to others, in particular to the IUploadable."""

    def get_size():
        """This behaves just like IUploadable.get_size()."""

    def get_all_encoding_parameters():
        """Return a Deferred that fires with a tuple of
        (k,happy,n,segment_size). The segment_size will be used as-is, and
        must match the following constraints: it must be a multiple of k, and
        it shouldn't be unreasonably larger than the file size (if
        segment_size is larger than filesize, the difference must be stored
        as padding).

        This usually passes through to the IUploadable method of the same
        name.

        The encoder strictly obeys the values returned by this method. To
        make an upload use non-default encoding parameters, you must arrange
        to control the values that this method returns.
        """

    def get_storage_index():
        """Return a Deferred that fires with a 16-byte storage index.
        """

    def read_encrypted(length, hash_only):
        """This behaves just like IUploadable.read(), but returns crypttext
        instead of plaintext. If hash_only is True, then this discards the
        data (and returns an empty list); this improves efficiency when
        resuming an interrupted upload (where we need to compute the
        plaintext hashes, but don't need the redundant encrypted data)."""

    def get_plaintext_hashtree_leaves(first, last, num_segments):
        """OBSOLETE; Get the leaf nodes of a merkle hash tree over the
        plaintext segments, i.e. get the tagged hashes of the given segments.
        The segment size is expected to be generated by the
        IEncryptedUploadable before any plaintext is read or ciphertext
        produced, so that the segment hashes can be generated with only a
        single pass.

        This returns a Deferred which fires with a sequence of hashes, using:

         tuple(segment_hashes[first:last])

        'num_segments' is used to assert that the number of segments that the
        IEncryptedUploadable handled matches the number of segments that the
        encoder was expecting.

        This method must not be called until the final byte has been read
        from read_encrypted(). Once this method is called, read_encrypted()
        can never be called again.
        """

    def get_plaintext_hash():
        """OBSOLETE; Get the hash of the whole plaintext.

        This returns a Deferred which fires with a tagged SHA-256 hash of the
        whole plaintext, obtained from hashutil.plaintext_hash(data).
        """

    def close():
        """Just like IUploadable.close()."""

class IUploadable(Interface):
    def set_upload_status(upload_status):
        """Provide an IUploadStatus object that should be filled with status
        information. The IUploadable is responsible for setting
        key-determination progress ('chk')."""

    def set_default_encoding_parameters(params):
        """Set the default encoding parameters, which must be a dict mapping
        strings to ints. The meaningful keys are 'k', 'happy', 'n', and
        'max_segment_size'. These might have an influence on the final
        encoding parameters returned by get_all_encoding_parameters(), if the
        Uploadable doesn't have more specific preferences.

        This call is optional: if it is not used, the Uploadable will use
        some built-in defaults. If used, this method must be called before
        any other IUploadable methods to have any effect.
        """

    def get_size():
        """Return a Deferred that will fire with the length of the data to be
        uploaded, in bytes. This will be called before the data is actually
        used, to compute encoding parameters.
        """

    def get_all_encoding_parameters():
        """Return a Deferred that fires with a tuple of
        (k,happy,n,segment_size). The segment_size will be used as-is, and
        must match the following constraints: it must be a multiple of k, and
        it shouldn't be unreasonably larger than the file size (if
        segment_size is larger than filesize, the difference must be stored
        as padding).

        The relative values of k and n allow some IUploadables to request
        better redundancy than others (in exchange for consuming more space
        in the grid).

        Larger values of segment_size reduce hash overhead, while smaller
        values reduce memory footprint and cause data to be delivered in
        smaller pieces (which may provide a smoother and more predictable
        download experience).

        The encoder strictly obeys the values returned by this method. To
        make an upload use non-default encoding parameters, you must arrange
        to control the values that this method returns. One way to influence
        them may be to call set_encoding_parameters() before calling
        get_all_encoding_parameters().
        """

    def get_encryption_key():
        """Return a Deferred that fires with a 16-byte AES key. This key will
        be used to encrypt the data. The key will also be hashed to derive
        the StorageIndex.

        Uploadables which want to achieve convergence should hash their file
        contents and the serialized_encoding_parameters to form the key
        (which of course requires a full pass over the data). Uploadables can
        use the upload.ConvergentUploadMixin class to achieve this
        automatically.

        Uploadables which do not care about convergence (or do not wish to
        make multiple passes over the data) can simply return a
        strongly-random 16 byte string.

        get_encryption_key() may be called multiple times: the IUploadable is
        required to return the same value each time.
        """

    def read(length):
        """Return a Deferred that fires with a list of strings (perhaps with
        only a single element) which, when concatenated together, contain the
        next 'length' bytes of data. If EOF is near, this may provide fewer
        than 'length' bytes. The total number of bytes provided by read()
        before it signals EOF must equal the size provided by get_size().

        If the data must be acquired through multiple internal read
        operations, returning a list instead of a single string may help to
        reduce string copies. However, the length of the concatenated strings
        must equal the amount of data requested, unless EOF is encountered.
        Long reads, or short reads without EOF, are not allowed. read()
        should return the same amount of data as a local disk file read, just
        in a different shape and asynchronously.

        'length' will typically be equal to (min(get_size(),1MB)/req_shares),
        so a 10kB file means length=3kB, 100kB file means length=30kB,
        and >=1MB file means length=300kB.

        This method provides for a single full pass through the data. Later
        use cases may desire multiple passes or access to only parts of the
        data (such as a mutable file making small edits-in-place). This API
        will be expanded once those use cases are better understood.
        """

    def close():
        """The upload is finished, and whatever filehandle was in use may be
        closed."""


class IMutableUploadable(Interface):
    """
    I represent content that is due to be uploaded to a mutable filecap.
    """
    # This is somewhat simpler than the IUploadable interface above
    # because mutable files do not need to be concerned with possibly
    # generating a CHK, nor with per-file keys. It is a subset of the
    # methods in IUploadable, though, so we could just as well implement
    # the mutable uploadables as IUploadables that don't happen to use
    # those methods (with the understanding that the unused methods will
    # never be called on such objects)
    def get_size():
        """
        Returns a Deferred that fires with the size of the content held
        by the uploadable.
        """

    def read(length):
        """
        Returns a list of strings which, when concatenated, are the next
        length bytes of the file, or fewer if there are fewer bytes
        between the current location and the end of the file.
        """

    def close():
        """
        The process that used the Uploadable is finished using it, so
        the uploadable may be closed.
        """

class IUploadResults(Interface):
    """I am returned by upload() methods. I contain a number of public
    attributes which can be read to determine the results of the upload. Some
    of these are functional, some are timing information. All of these may be
    None.

     .file_size : the size of the file, in bytes
     .uri : the CHK read-cap for the file
     .ciphertext_fetched : how many bytes were fetched by the helper
     .sharemap: dict mapping share identifier to set of serverids
                   (binary strings). This indicates which servers were given
                   which shares. For immutable files, the shareid is an
                   integer (the share number, from 0 to N-1). For mutable
                   files, it is a string of the form 'seq%d-%s-sh%d',
                   containing the sequence number, the roothash, and the
                   share number.
     .servermap : dict mapping server peerid to a set of share numbers
     .timings : dict of timing information, mapping name to seconds (float)
       total : total upload time, start to finish
       storage_index : time to compute the storage index
       peer_selection : time to decide which peers will be used
       contacting_helper : initial helper query to upload/no-upload decision
       existence_check : helper pre-upload existence check
       helper_total : initial helper query to helper finished pushing
       cumulative_fetch : helper waiting for ciphertext requests
       total_fetch : helper start to last ciphertext response
       cumulative_encoding : just time spent in zfec
       cumulative_sending : just time spent waiting for storage servers
       hashes_and_close : last segment push to shareholder close
       total_encode_and_push : first encode to shareholder close

    """

class IDownloadResults(Interface):
    """I am created internally by download() methods. I contain a number of
    public attributes which contain details about the download process.::

     .file_size : the size of the file, in bytes
     .servers_used : set of server peerids that were used during download
     .server_problems : dict mapping server peerid to a problem string. Only
                        servers that had problems (bad hashes, disconnects)
                        are listed here.
     .servermap : dict mapping server peerid to a set of share numbers. Only
                  servers that had any shares are listed here.
     .timings : dict of timing information, mapping name to seconds (float)
       peer_selection : time to ask servers about shares
       servers_peer_selection : dict of peerid to DYHB-query time
       uri_extension : time to fetch a copy of the URI extension block
       hashtrees : time to fetch the hash trees
       segments : time to fetch, decode, and deliver segments
       cumulative_fetch : time spent waiting for storage servers
       cumulative_decode : just time spent in zfec
       cumulative_decrypt : just time spent in decryption
       total : total download time, start to finish
       fetch_per_server : dict of server to list of per-segment fetch times

    """

class IUploader(Interface):
    def upload(uploadable):
        """Upload the file. 'uploadable' must impement IUploadable. This
        returns a Deferred which fires with an IUploadResults instance, from
        which the URI of the file can be obtained as results.uri ."""

    def upload_ssk(write_capability, new_version, uploadable):
        """TODO: how should this work?"""

class ICheckable(Interface):
    def check(monitor, verify=False, add_lease=False):
        """Check up on my health, optionally repairing any problems.

        This returns a Deferred that fires with an instance that provides
        ICheckResults, or None if the object is non-distributed (i.e. LIT
        files).

        The monitor will be checked periodically to see if the operation has
        been cancelled. If so, no new queries will be sent, and the Deferred
        will fire (with a OperationCancelledError) immediately.

        Filenodes and dirnodes (which provide IFilesystemNode) are also
        checkable. Instances that represent verifier-caps will be checkable
        but not downloadable. Some objects (like LIT files) do not actually
        live in the grid, and their checkers return None (non-distributed
        files are always healthy).

        If verify=False, a relatively lightweight check will be performed: I
        will ask all servers if they have a share for me, and I will believe
        whatever they say. If there are at least N distinct shares on the
        grid, my results will indicate r.is_healthy()==True. This requires a
        roundtrip to each server, but does not transfer very much data, so
        the network bandwidth is fairly low.

        If verify=True, a more resource-intensive check will be performed:
        every share will be downloaded, and the hashes will be validated on
        every bit. I will ignore any shares that failed their hash checks. If
        there are at least N distinct valid shares on the grid, my results
        will indicate r.is_healthy()==True. This requires N/k times as much
        download bandwidth (and server disk IO) as a regular download. If a
        storage server is holding a corrupt share, or is experiencing memory
        failures during retrieval, or is malicious or buggy, then
        verification will detect the problem, but checking will not.

        If add_lease=True, I will ensure that an up-to-date lease is present
        on each share. The lease secrets will be derived from by node secret
        (in BASEDIR/private/secret), so either I will add a new lease to the
        share, or I will merely renew the lease that I already had. In a
        future version of the storage-server protocol (once Accounting has
        been implemented), there may be additional options here to define the
        kind of lease that is obtained (which account number to claim, etc).

        TODO: any problems seen during checking will be reported to the
        health-manager.furl, a centralized object which is responsible for
        figuring out why files are unhealthy so corrective action can be
        taken.
        """

    def check_and_repair(monitor, verify=False, add_lease=False):
        """Like check(), but if the file/directory is not healthy, attempt to
        repair the damage.

        Any non-healthy result will cause an immediate repair operation, to
        generate and upload new shares. After repair, the file will be as
        healthy as we can make it. Details about what sort of repair is done
        will be put in the check-and-repair results. The Deferred will not
        fire until the repair is complete.

        This returns a Deferred which fires with an instance of
        ICheckAndRepairResults."""

class IDeepCheckable(Interface):
    def start_deep_check(verify=False, add_lease=False):
        """Check upon the health of me and everything I can reach.

        This is a recursive form of check(), useable only on dirnodes.

        I return a Monitor, with results that are an IDeepCheckResults
        object.

        TODO: If any of the directories I traverse are unrecoverable, the
        Monitor will report failure. If any of the files I check upon are
        unrecoverable, those problems will be reported in the
        IDeepCheckResults as usual, and the Monitor will not report a
        failure.
        """

    def start_deep_check_and_repair(verify=False, add_lease=False):
        """Check upon the health of me and everything I can reach. Repair
        anything that isn't healthy.

        This is a recursive form of check_and_repair(), useable only on
        dirnodes.

        I return a Monitor, with results that are an
        IDeepCheckAndRepairResults object.

        TODO: If any of the directories I traverse are unrecoverable, the
        Monitor will report failure. If any of the files I check upon are
        unrecoverable, those problems will be reported in the
        IDeepCheckResults as usual, and the Monitor will not report a
        failure.
        """

class ICheckResults(Interface):
    """I contain the detailed results of a check/verify operation.
    """

    def get_storage_index():
        """Return a string with the (binary) storage index."""
    def get_storage_index_string():
        """Return a string with the (printable) abbreviated storage index."""
    def get_uri():
        """Return the (string) URI of the object that was checked."""

    def is_healthy():
        """Return a boolean, True if the file/dir is fully healthy, False if
        it is damaged in any way. Non-distributed LIT files always return
        True."""

    def is_recoverable():
        """Return a boolean, True if the file/dir can be recovered, False if
        not. Unrecoverable files are obviously unhealthy. Non-distributed LIT
        files always return True."""

    def needs_rebalancing():
        """Return a boolean, True if the file/dir's reliability could be
        improved by moving shares to new servers. Non-distributed LIT files
        always return False."""


    def get_data():
        """Return a dictionary that describes the state of the file/dir. LIT
        files always return an empty dictionary. Normal files and directories
        return a dictionary with the following keys (note that these use
        binary strings rather than base32-encoded ones) (also note that for
        mutable files, these counts are for the 'best' version):

         count-shares-good: the number of distinct good shares that were found
         count-shares-needed: 'k', the number of shares required for recovery
         count-shares-expected: 'N', the number of total shares generated
         count-good-share-hosts: the number of distinct storage servers with
                                 good shares. If this number is less than
                                 count-shares-good, then some shares are
                                 doubled up, increasing the correlation of
                                 failures. This indicates that one or more
                                 shares should be moved to an otherwise unused
                                 server, if one is available.
         count-corrupt-shares: the number of shares with integrity failures
         list-corrupt-shares: a list of 'share locators', one for each share
                              that was found to be corrupt. Each share
                              locator is a list of (serverid, storage_index,
                              sharenum).
         count-incompatible-shares: the number of shares which are of a share
                                    format unknown to this checker
         list-incompatible-shares: a list of 'share locators', one for each
                                   share that was found to be of an unknown
                                   format. Each share locator is a list of
                                   (serverid, storage_index, sharenum).
         servers-responding: list of (binary) storage server identifiers,
                             one for each server which responded to the share
                             query (even if they said they didn't have
                             shares, and even if they said they did have
                             shares but then didn't send them when asked, or
                             dropped the connection, or returned a Failure,
                             and even if they said they did have shares and
                             sent incorrect ones when asked)
         sharemap: dict mapping share identifier to list of serverids
                   (binary strings). This indicates which servers are holding
                   which shares. For immutable files, the shareid is an
                   integer (the share number, from 0 to N-1). For mutable
                   files, it is a string of the form 'seq%d-%s-sh%d',
                   containing the sequence number, the roothash, and the
                   share number.

        The following keys are most relevant for mutable files, but immutable
        files will provide sensible values too::

         count-wrong-shares: the number of shares for versions other than the
                             'best' one (which is defined as being the
                             recoverable version with the highest sequence
                             number, then the highest roothash). These are
                             either leftover shares from an older version
                             (perhaps on a server that was offline when an
                             update occurred), shares from an unrecoverable
                             newer version, or shares from an alternate
                             current version that results from an
                             uncoordinated write collision. For a healthy
                             file, this will equal 0.

         count-recoverable-versions: the number of recoverable versions of
                                     the file. For a healthy file, this will
                                     equal 1.

         count-unrecoverable-versions: the number of unrecoverable versions
                                       of the file. For a healthy file, this
                                       will be 0.

        """

    def get_summary():
        """Return a string with a brief (one-line) summary of the results."""

    def get_report():
        """Return a list of strings with more detailed results."""

class ICheckAndRepairResults(Interface):
    """I contain the detailed results of a check/verify/repair operation.

    The IFilesystemNode.check()/verify()/repair() methods all return
    instances that provide ICheckAndRepairResults.
    """

    def get_storage_index():
        """Return a string with the (binary) storage index."""
    def get_storage_index_string():
        """Return a string with the (printable) abbreviated storage index."""
    def get_repair_attempted():
        """Return a boolean, True if a repair was attempted. We might not
        attempt to repair the file because it was healthy, or healthy enough
        (i.e. some shares were missing but not enough to exceed some
        threshold), or because we don't know how to repair this object."""
    def get_repair_successful():
        """Return a boolean, True if repair was attempted and the file/dir
        was fully healthy afterwards. False if no repair was attempted or if
        a repair attempt failed."""
    def get_pre_repair_results():
        """Return an ICheckResults instance that describes the state of the
        file/dir before any repair was attempted."""
    def get_post_repair_results():
        """Return an ICheckResults instance that describes the state of the
        file/dir after any repair was attempted. If no repair was attempted,
        the pre-repair and post-repair results will be identical."""


class IDeepCheckResults(Interface):
    """I contain the results of a deep-check operation.

    This is returned by a call to ICheckable.deep_check().
    """

    def get_root_storage_index_string():
        """Return the storage index (abbreviated human-readable string) of
        the first object checked."""
    def get_counters():
        """Return a dictionary with the following keys::

             count-objects-checked: count of how many objects were checked
             count-objects-healthy: how many of those objects were completely
                                    healthy
             count-objects-unhealthy: how many were damaged in some way
             count-objects-unrecoverable: how many were unrecoverable
             count-corrupt-shares: how many shares were found to have
                                   corruption, summed over all objects
                                   examined
        """

    def get_corrupt_shares():
        """Return a set of (serverid, storage_index, sharenum) for all shares
        that were found to be corrupt. Both serverid and storage_index are
        binary.
        """
    def get_all_results():
        """Return a dictionary mapping pathname (a tuple of strings, ready to
        be slash-joined) to an ICheckResults instance, one for each object
        that was checked."""

    def get_results_for_storage_index(storage_index):
        """Retrive the ICheckResults instance for the given (binary)
        storage index. Raises KeyError if there are no results for that
        storage index."""

    def get_stats():
        """Return a dictionary with the same keys as
        IDirectoryNode.deep_stats()."""

class IDeepCheckAndRepairResults(Interface):
    """I contain the results of a deep-check-and-repair operation.

    This is returned by a call to ICheckable.deep_check_and_repair().
    """

    def get_root_storage_index_string():
        """Return the storage index (abbreviated human-readable string) of
        the first object checked."""
    def get_counters():
        """Return a dictionary with the following keys::

             count-objects-checked: count of how many objects were checked
             count-objects-healthy-pre-repair: how many of those objects were
                                               completely healthy (before any
                                               repair)
             count-objects-unhealthy-pre-repair: how many were damaged in
                                                 some way
             count-objects-unrecoverable-pre-repair: how many were unrecoverable
             count-objects-healthy-post-repair: how many of those objects were
                                                completely healthy (after any
                                                repair)
             count-objects-unhealthy-post-repair: how many were damaged in
                                                  some way
             count-objects-unrecoverable-post-repair: how many were
                                                      unrecoverable
             count-repairs-attempted: repairs were attempted on this many
                                      objects. The count-repairs- keys will
                                      always be provided, however unless
                                      repair=true is present, they will all
                                      be zero.
             count-repairs-successful: how many repairs resulted in healthy
                                       objects
             count-repairs-unsuccessful: how many repairs resulted did not
                                         results in completely healthy objects
             count-corrupt-shares-pre-repair: how many shares were found to
                                              have corruption, summed over all
                                              objects examined (before any
                                              repair)
             count-corrupt-shares-post-repair: how many shares were found to
                                               have corruption, summed over all
                                               objects examined (after any
                                               repair)
        """

    def get_stats():
        """Return a dictionary with the same keys as
        IDirectoryNode.deep_stats()."""

    def get_corrupt_shares():
        """Return a set of (serverid, storage_index, sharenum) for all shares
        that were found to be corrupt before any repair was attempted. Both
        serverid and storage_index are binary.
        """
    def get_remaining_corrupt_shares():
        """Return a set of (serverid, storage_index, sharenum) for all shares
        that were found to be corrupt after any repair was completed. Both
        serverid and storage_index are binary. These are shares that need
        manual inspection and probably deletion.
        """
    def get_all_results():
        """Return a dictionary mapping pathname (a tuple of strings, ready to
        be slash-joined) to an ICheckAndRepairResults instance, one for each
        object that was checked."""

    def get_results_for_storage_index(storage_index):
        """Retrive the ICheckAndRepairResults instance for the given (binary)
        storage index. Raises KeyError if there are no results for that
        storage index."""


class IRepairable(Interface):
    def repair(check_results):
        """Attempt to repair the given object. Returns a Deferred that fires
        with a IRepairResults object.

        I must be called with an object that implements ICheckResults, as
        proof that you have actually discovered a problem with this file. I
        will use the data in the checker results to guide the repair process,
        such as which servers provided bad data and should therefore be
        avoided. The ICheckResults object is inside the
        ICheckAndRepairResults object, which is returned by the
        ICheckable.check() method::

         d = filenode.check(repair=False)
         def _got_results(check_and_repair_results):
             check_results = check_and_repair_results.get_pre_repair_results()
             return filenode.repair(check_results)
         d.addCallback(_got_results)
         return d
        """

class IRepairResults(Interface):
    """I contain the results of a repair operation."""
    def get_successful(self):
        """Returns a boolean: True if the repair made the file healthy, False
        if not. Repair failure generally indicates a file that has been
        damaged beyond repair."""


class IClient(Interface):
    def upload(uploadable):
        """Upload some data into a CHK, get back the UploadResults for it.
        @param uploadable: something that implements IUploadable
        @return: a Deferred that fires with the UploadResults instance.
                 To get the URI for this file, use results.uri .
        """

    def create_mutable_file(contents=""):
        """Create a new mutable file (with initial) contents, get back the
        new node instance.

        @param contents: (bytestring, callable, or None): this provides the
        initial contents of the mutable file. If 'contents' is a bytestring,
        it will be used as-is. If 'contents' is a callable, it will be
        invoked with the new MutableFileNode instance and is expected to
        return a bytestring with the initial contents of the file (the
        callable can use node.get_writekey() to decide how to encrypt the
        initial contents, e.g. for a brand new dirnode with initial
        children). contents=None is equivalent to an empty string. Using
        content_maker= is more efficient than creating a mutable file and
        setting its contents in two separate operations.

        @return: a Deferred that fires with an IMutableFileNode instance.
        """

    def create_dirnode(initial_children={}):
        """Create a new unattached dirnode, possibly with initial children.

        @param initial_children: dict with keys that are unicode child names,
        and values that are (childnode, metadata) tuples.

        @return: a Deferred that fires with the new IDirectoryNode instance.
        """

    def create_node_from_uri(uri, rouri):
        """Create a new IFilesystemNode instance from the uri, synchronously.
        @param uri: a string or IURI-providing instance, or None. This could
                    be for a LiteralFileNode, a CHK file node, a mutable file
                    node, or a directory node
        @param rouri: a string or IURI-providing instance, or None. If the
                      main uri is None, I will use the rouri instead. If I
                      recognize the format of the main uri, I will ignore the
                      rouri (because it can be derived from the writecap).

        @return: an instance that provides IFilesystemNode (or more usefully
                 one of its subclasses). File-specifying URIs will result in
                 IFileNode-providing instances, like ImmutableFileNode,
                 LiteralFileNode, or MutableFileNode. Directory-specifying
                 URIs will result in IDirectoryNode-providing instances, like
                 DirectoryNode.
        """

class INodeMaker(Interface):
    """The NodeMaker is used to create IFilesystemNode instances. It can
    accept a filecap/dircap string and return the node right away. It can
    also create new nodes (i.e. upload a file, or create a mutable file)
    asynchronously. Once you have one of these nodes, you can use other
    methods to determine whether it is a file or directory, and to download
    or modify its contents.

    The NodeMaker encapsulates all the authorities that these
    IFilesystemNodes require (like references to the StorageFarmBroker). Each
    Tahoe process will typically have a single NodeMaker, but unit tests may
    create simplified/mocked forms for testing purposes.
    """
    def create_from_cap(writecap, readcap=None, **kwargs):
        """I create an IFilesystemNode from the given writecap/readcap. I can
        only provide nodes for existing file/directory objects: use my other
        methods to create new objects. I return synchronously."""

    def create_mutable_file(contents=None, keysize=None):
        """I create a new mutable file, and return a Deferred which will fire
        with the IMutableFileNode instance when it is ready. If contents= is
        provided (a bytestring), it will be used as the initial contents of
        the new file, otherwise the file will contain zero bytes. keysize= is
        for use by unit tests, to create mutable files that are smaller than
        usual."""

    def create_new_mutable_directory(initial_children={}):
        """I create a new mutable directory, and return a Deferred which will
        fire with the IDirectoryNode instance when it is ready. If
        initial_children= is provided (a dict mapping unicode child name to
        (childnode, metadata_dict) tuples), the directory will be populated
        with those children, otherwise it will be empty."""

class IClientStatus(Interface):
    def list_all_uploads():
        """Return a list of uploader objects, one for each upload which
        currently has an object available (tracked with weakrefs). This is
        intended for debugging purposes."""
    def list_active_uploads():
        """Return a list of active IUploadStatus objects."""
    def list_recent_uploads():
        """Return a list of IUploadStatus objects for the most recently
        started uploads."""

    def list_all_downloads():
        """Return a list of downloader objects, one for each download which
        currently has an object available (tracked with weakrefs). This is
        intended for debugging purposes."""
    def list_active_downloads():
        """Return a list of active IDownloadStatus objects."""
    def list_recent_downloads():
        """Return a list of IDownloadStatus objects for the most recently
        started downloads."""

class IUploadStatus(Interface):
    def get_started():
        """Return a timestamp (float with seconds since epoch) indicating
        when the operation was started."""
    def get_storage_index():
        """Return a string with the (binary) storage index in use on this
        upload. Returns None if the storage index has not yet been
        calculated."""
    def get_size():
        """Return an integer with the number of bytes that will eventually
        be uploaded for this file. Returns None if the size is not yet known.
        """
    def using_helper():
        """Return True if this upload is using a Helper, False if not."""
    def get_status():
        """Return a string describing the current state of the upload
        process."""
    def get_progress():
        """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
        each from 0.0 to 1.0 . 'chk' describes how much progress has been
        made towards hashing the file to determine a CHK encryption key: if
        non-convergent encryption is in use, this will be trivial, otherwise
        the whole file must be hashed. 'ciphertext' describes how much of the
        ciphertext has been pushed to the helper, and is '1.0' for non-helper
        uploads. 'encode_and_push' describes how much of the encode-and-push
        process has finished: for helper uploads this is dependent upon the
        helper providing progress reports. It might be reasonable to add all
        three numbers and report the sum to the user."""
    def get_active():
        """Return True if the upload is currently active, False if not."""
    def get_results():
        """Return an instance of UploadResults (which contains timing and
        sharemap information). Might return None if the upload is not yet
        finished."""
    def get_counter():
        """Each upload status gets a unique number: this method returns that
        number. This provides a handle to this particular upload, so a web
        page can generate a suitable hyperlink."""

class IDownloadStatus(Interface):
    def get_started():
        """Return a timestamp (float with seconds since epoch) indicating
        when the operation was started."""
    def get_storage_index():
        """Return a string with the (binary) storage index in use on this
        download. This may be None if there is no storage index (i.e. LIT
        files)."""
    def get_size():
        """Return an integer with the number of bytes that will eventually be
        retrieved for this file. Returns None if the size is not yet known.
        """
    def using_helper():
        """Return True if this download is using a Helper, False if not."""
    def get_status():
        """Return a string describing the current state of the download
        process."""
    def get_progress():
        """Returns a float (from 0.0 to 1.0) describing the amount of the
        download that has completed. This value will remain at 0.0 until the
        first byte of plaintext is pushed to the download target."""
    def get_active():
        """Return True if the download is currently active, False if not."""
    def get_counter():
        """Each download status gets a unique number: this method returns
        that number. This provides a handle to this particular download, so a
        web page can generate a suitable hyperlink."""

class IServermapUpdaterStatus(Interface):
    pass
class IPublishStatus(Interface):
    pass
class IRetrieveStatus(Interface):
    pass

class NotCapableError(Exception):
    """You have tried to write to a read-only node."""

class BadWriteEnablerError(Exception):
    pass

class RIControlClient(RemoteInterface):

    def wait_for_client_connections(num_clients=int):
        """Do not return until we have connections to at least NUM_CLIENTS
        storage servers.
        """

    def upload_from_file_to_uri(filename=str,
                                convergence=ChoiceOf(None,
                                                     StringConstraint(2**20))):
        """Upload a file to the grid. This accepts a filename (which must be
        absolute) that points to a file on the node's local disk. The node will
        read the contents of this file, upload it to the grid, then return the
        URI at which it was uploaded.  If convergence is None then a random
        encryption key will be used, else the plaintext will be hashed, then
        that hash will be mixed together with the "convergence" string to form
        the encryption key.
        """
        return URI

    def download_from_uri_to_file(uri=URI, filename=str):
        """Download a file from the grid, placing it on the node's local disk
        at the given filename (which must be absolute[?]). Returns the
        absolute filename where the file was written."""
        return str

    # debug stuff

    def get_memory_usage():
        """Return a dict describes the amount of memory currently in use. The
        keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
        measuring memory consupmtion in bytes."""
        return DictOf(str, int)

    def speed_test(count=int, size=int, mutable=Any()):
        """Write 'count' tempfiles to disk, all of the given size. Measure
        how long (in seconds) it takes to upload them all to the servers.
        Then measure how long it takes to download all of them. If 'mutable'
        is 'create', time creation of mutable files. If 'mutable' is
        'upload', then time access to the same mutable file instead of
        creating one.

        Returns a tuple of (upload_time, download_time).
        """
        return (float, float)

    def measure_peer_response_time():
        """Send a short message to each connected peer, and measure the time
        it takes for them to respond to it. This is a rough measure of the
        application-level round trip time.

        @return: a dictionary mapping peerid to a float (RTT time in seconds)
        """

        return DictOf(str, float)

UploadResults = Any() #DictOf(str, str)

class RIEncryptedUploadable(RemoteInterface):
    __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"

    def get_size():
        return Offset

    def get_all_encoding_parameters():
        return (int, int, int, long)

    def read_encrypted(offset=Offset, length=ReadSize):
        return ListOf(str)

    def close():
        return None


class RICHKUploadHelper(RemoteInterface):
    __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"

    def get_version():
        """
        Return a dictionary of version information.
        """
        return DictOf(str, Any())

    def upload(reader=RIEncryptedUploadable):
        return UploadResults


class RIHelper(RemoteInterface):
    __remote_name__ = "RIHelper.tahoe.allmydata.com"

    def get_version():
        """
        Return a dictionary of version information.
        """
        return DictOf(str, Any())

    def upload_chk(si=StorageIndex):
        """See if a file with a given storage index needs uploading. The
        helper will ask the appropriate storage servers to see if the file
        has already been uploaded. If so, the helper will return a set of
        'upload results' that includes whatever hashes are needed to build
        the read-cap, and perhaps a truncated sharemap.

        If the file has not yet been uploaded (or if it was only partially
        uploaded), the helper will return an empty upload-results dictionary
        and also an RICHKUploadHelper object that will take care of the
        upload process. The client should call upload() on this object and
        pass it a reference to an RIEncryptedUploadable object that will
        provide ciphertext. When the upload is finished, the upload() method
        will finish and return the upload results.
        """
        return (UploadResults, ChoiceOf(RICHKUploadHelper, None))


class RIStatsProvider(RemoteInterface):
    __remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
    """
    Provides access to statistics and monitoring information.
    """

    def get_stats():
        """
        returns a dictionary containing 'counters' and 'stats', each a
        dictionary with string counter/stat name keys, and numeric or None values.
        counters are monotonically increasing measures of work done, and
        stats are instantaneous measures (potentially time averaged
        internally)
        """
        return DictOf(str, DictOf(str, ChoiceOf(float, int, long, None)))

class RIStatsGatherer(RemoteInterface):
    __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
    """
    Provides a monitoring service for centralised collection of stats
    """

    def provide(provider=RIStatsProvider, nickname=str):
        """
        @param provider: a stats collector instance which should be polled
                         periodically by the gatherer to collect stats.
        @param nickname: a name useful to identify the provided client
        """
        return None


class IStatsProducer(Interface):
    def get_stats():
        """
        returns a dictionary, with str keys representing the names of stats
        to be monitored, and numeric values.
        """

class RIKeyGenerator(RemoteInterface):
    __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
    """
    Provides a service offering to make RSA key pairs.
    """

    def get_rsa_key_pair(key_size=int):
        """
        @param key_size: the size of the signature key.
        @return: tuple(verifying_key, signing_key)
        """
        return TupleOf(str, str)


class FileTooLargeError(Exception):
    pass

class IValidatedThingProxy(Interface):
    def start():
        """ Acquire a thing and validate it. Return a deferred which is
        eventually fired with self if the thing is valid or errbacked if it
        can't be acquired or validated."""

class InsufficientVersionError(Exception):
    def __init__(self, needed, got):
        self.needed = needed
        self.got = got
    def __repr__(self):
        return "InsufficientVersionError(need '%s', got %s)" % (self.needed,
                                                                self.got)

class EmptyPathnameComponentError(Exception):
    """The webapi disallows empty pathname components."""