File: tools.exp

package info (click to toggle)
opa-ff 10.10.3.0.11-1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm, bullseye, forky, sid, trixie
  • size: 17,372 kB
  • sloc: ansic: 189,318; sh: 25,941; perl: 15,145; exp: 8,415; makefile: 5,315; xml: 189; cpp: 93; tcl: 50; python: 44
file content (3736 lines) | stat: -rw-r--r-- 112,141 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
# BEGIN_ICS_COPYRIGHT8 ****************************************
# 
# Copyright (c) 2015, Intel Corporation
# 
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 
#     * Redistributions of source code must retain the above copyright notice,
#       this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of Intel Corporation nor the names of its contributors
#       may be used to endorse or promote products derived from this software
#       without specific prior written permission.
# 
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# 
# END_ICS_COPYRIGHT8   ****************************************

# [ICS VERSION STRING: unknown]

## The tools.exp expect script file contains various utility routines which can
## be used in the generation of test scripts
## It is auto sourced into expect

## Future enhancement - provide a spawn_id argument to the expect_ procedures?
## Current handling assumes a single spawned process or the manipulation of
## spawn_id prior to calling them.
##

## User Controlled Global variables:
## ---------------------------------
## These variables are available to the user for read/write access
## They are used (read only) internally by the tools procedures.
##
## env(TEST_STOP_FAILED)
##	none - do not stop on any failure
##	suite - abort test suite only on the failure of a test suite
##	case - abort test suite on the failure of any test case
##	any - abort test suite on the failure of any test item
##	default is none
## env(TEST_SAVE_FAILED)
##	no - do not save any temporary files in the case of a failure
##	yes - save all temporary files in the case of a failure
##	default is yes
## env(TEST_SAVE_TEMP)
##	no - do not save any temporary files between test cases
##	yes - save all temporary files for each test case
##	default is no
## env(TEST_SAVE_FIRST)
##	when saving temp files for TEST_SAVE_FAILED or TEST_SAVE_TEMP should
##	an existing save_tmp be retained in preference to the newer files
##	yes - keep existing files, reflecting 1st run or failure of test
##	no - replace existing files, reflecting last run or failure of test
##	default yes
## env(TEST_PARALLEL)
##	no - serialize all tests
##	yes - when supported by a test_suite, parallelize test cases
##	default is yes
## env(TEST_MAX_PARALLEL)
##	maximum processes to concurrently execute in parallel blocks
##	default is 20
## env(TEST_TIMEOUT_MULT)
##	multiplier for all timeouts during a test (used to account for slower hosts)
##	default is 1
## env(TEST_SHOW_CONFIG)
##	show configuration at start of test suite
##	default is yes
## env(TEST_SHOW_START)
##	output a message at start of each test case
##	default is no
## env(TEST_RESULT_DIR)
##	directory to put test.log, test.res, identify.log, save_tmp, and test_tmp
##	default is .
## env(CFG_*)
##	assorted test target/driver configuration information from config file
## env(TEST_SUITES)
## env(TEST_SKIP_SUITES)
##	A space separated list of test suite tags for test suites to run/not run.
##	If not exported all the test suites are run
##	For example:
##		export TEST_SUITES="suite1 suite2"
##	will cause only tests within suite1 and suite2 to be executed. All
##	other test suites will be completely skipped including their
##	initialization and cleanup code.
##	The test cases run within the selected test suites can be further
##	limited by using the TEST_CASES variable (see below).
##	Shell style "glob" patterns can be used
##	If a test suite matches both TEST_SUITES and TEST_SKIP_SUITES it is skipped
## env(TEST_CASES)
## env(TEST_SKIP_CASES)
##	A space separated list of tags for the test cases to run/not run.  The tags
##	can be either the tag for a test case or a test_suite_tag.test_case_tag.
##	If not exported all the test cases in the selected test suites are
##	run.
##	For example:
##		export TEST_CASES="tag1 tag2"
##	will cause test cases tag1 and tag2 to be executed within all the
##	selected test suites.
##		export TEST_CASES="suite1.tag1 suite2.tag2"
##	Will cause test case tag1 within suite1 to be executed and tag2
##	within suite2 to be executed.  Note that this will not prevent the
##	execution of initialization and cleanup code from any other test suites 
##	hence it is usually desirable when multiple test suites are potentially
##	being run, to limit the test suites by using the TEST_SUITES variable
##	(see above).  So in the case above:
##		export TEST_SUITES="suite1 suite2"
##		export TEST_CASES="suite1.tag1 suite2.tag2"
##	will make sure that any test suites other than suite1 or suite2
##	are totally skipped.
##	Shell style "glob" patterns can be used
##	If a test case matches both TEST_CASES and TEST_SKIP_CASES it is skipped
## env(TEST_ITEMS)
## env(TEST_SKIP_ITEMS)
##	A space separated list of tags for the test items to run/not run.  The tags
##	can be either the tag for a test item or a
##	test_suite_tag.test_case_tag.test_item_tag.
##	If not exported all the test items in the selected test
##	cases in the selected test suites are run.
##	For example:
##		export TEST_ITEMS="tag1 tag2"
##	will cause test items tag1 and tag2 to be executed within all the
##	selected test cases.
##		export TEST_ITEMS="suite1.tag1.item1 suite2.tag2.item2"
##	Will cause test item item1 within test case tag1 within suite1 to be
##	executed and item2 in test case tag2 within suite2 to be executed.
##	Note that this will not prevent the execution of initialization and
##	cleanup code from any other test suites hence it is usually desirable
##	when multiple test suites are potentially being run, to limit the test
##	cases and suites by using the TEST_SUITES and TEST_CASES variables
##	(see above).  So in the case above:
##		export TEST_SUITES="suite1 suite2"
##		export TEST_CASES="suite1.tag1 suite2.tag2"
##		export TEST_ITEMS="suite1.tag1.item1 suite2.tag2.item2"
##	will make sure that any test suites other than suite1 or suite2
##	and any cases other than suite1.tag1 and suite2.tag2
##	are totally skipped.
##	Shell style "glob" patterns can be used
##	If a test item matches both TEST_ITEMS and TEST_SKIP_ITEMS it is skipped
## env(TEST_NOEXECUTE)
##	list of test operations/initializations to skip execution of
##	the valid set of entries is defined by the test suite itself.
##	This can be used to selectively disable test_execute operations
##	within the test suite, typically within the initialization routines.
## env(TEST_IDENTIFY)
##	disable test execution, simply identify the tests within the suite
##	this is not completely implemented yet.
## env(TEST_DEBUG_LEVEL)
##	Enable additional test debug output to the log
##
## Additional global variables (read only in this file,
##	 but internally set by test scripts):
## spawn_id - expect's spawn id, made global for ease of use
## spawn_out - expect's spawn output information, made global for ease of use
## interact_out - expect's interact output, made global for ease of use
## expect_out - expect's expect output, made global for ease of use
## timeout - expect's timeout value, made global for ease of use
## lines_to_show - number of lines to show from stdout of a child
##	when it exits.  Default is 10.
## expecting - current message or item being expected.  This is output in
##	the timeout handler for expect_after to aid debug of failed tests.
##	defaults to "" at start of each test case/suite.
## features(au,feature) - feature status for each AU
##
## Files Generated:
## ----------------
## Any test suite which uses these tools and the associated test automation
## makefiles (to run the tests) will automatically generate
## various files as follows:
##	test.res - summary of tests run and brief results
##	test.log - detailed summary of test
##	save_tmp/test_suite_tag/test_case_tag/*
##		- log and data files from running the given test suite and
##			test case, the test_suite_tag component will be
##			the actual test suite tag as specified in the
##			test_suite procedure call, similar for test_case_tag
##			The selection as to which cases generate such files
##			depends on the env(TEST_SAVE_FAILED) and
##			env(TEST_SAVE_TEMP) variables.  The default is to
##			only save results for failed test cases, but this can
##			be altered by the user through these variables.
##			The special test_case_tags of "suite_setup" and
##			suite_cleanup are used to save log files from the
##			suite_setup and suite_cleanup routines
##	test_tmp$$ - temporary files, renamed as save_tmp/... or removed
##			when test completes
##	identify.res - summary of test suites automated within the directory
##			This is generated when env(TEST_IDENTIFY)=yes
##			as opposed to actually running the test suites
## File Formats:
## -------------
## Some of these files can be machine parsed or easily greped for various
## patterns.  In the messages below, fields bracketed in '' are replaced with
## variables from execution time.  Hence ['DATE'] in a message may be
## emitted as [10:05:39 Jul 20, 1996]
##
##   test.res
##   --------
##	The test.res file contains the following types of messages:
##	Executing 'NAME' Test Suite ('TAG') ['DATE'] ...
##		- the given test suite is being started
##	Executing 'NAME' Test Suite ('TAG') ['DATE'] ...
##	SKIPPED
##		- the given test suite is being skipped
##	Test Configuration File: 'FILENAME'
##		- the given configuration file will guide the test suite
##		of course the environment may override config file values
##	FAILURE during test suite: 'MULTI_LINE_DESCRIPTION'
##		- the given error occured during the test suite and caused
##		it to prematurely abort.  No further messages will be provided
##		for the suite other than a multi-line description of the error
##	TEST SUITE FAILURE: 'MULTI_LINE_DESCRIPTION'
##	TEST SUITE 'NAME' TESTS ABORTED
##		- fatal test suite error
##	TEST SUITE 'NAME': 'CNT' Cases; 'CNT' PASSED; 'CNT' FAILED; 'CNT' SKIPPED
##	TEST SUITE 'NAME': 'CNT' Cases; 'CNT' PASSED; 'CNT' FAILED
##	TEST SUITE 'NAME': 'CNT' Cases; 'CNT' PASSED; 'CNT' SKIPPED
##	TEST SUITE 'NAME': 'CNT' Cases; 'CNT' PASSED
##		- cummulative results for test suite
##		- cummulative results for test suite
##	TEST SUITE 'NAME' PASSED
##	TEST SUITE 'NAME' FAILED
##		- overall results of suite (only passes if all cases passed
##		excluding those skipped)
##	Done 'NAME' Test Suite ['DATE']
##		- test suite completed
##	TEST SUITE 'NAME' CASE ('FULL_TAG') 'CASE_NAME': PASSED
##	TEST SUITE 'NAME' CASE ('FULL_TAG') 'CASE_NAME': FAILED
##	TEST SUITE 'NAME' CASE ('FULL_TAG') 'CASE_NAME': SKIPPED
##		- test case results, Note in some cases the PASSED/FAILED
##		may not appear on the same line as the TEST CASE name.
##		This can be due to configuration errors causing setup errors
##		or because the test case includes test items.  For the 
##		passing case, an additional line will be logged.
##	TEST CASE FAILURE='MULTI_LINE_ERROR'
##		test case failed for given reason
##	TEST CASE 'NAME': 'CNT' Items; 'CNT' STARTED; 'CNT' PASSED; 'CNT' FAILED
##	TEST CASE 'NAME': 'CNT' Items; 'CNT' STARTED; 'CNT' PASSED
##	TEST CASE 'NAME': 'CNT' Items; 'CNT' STARTED; 'CNT' FAILED
##	TEST CASE 'NAME': 'CNT' Items; 'CNT' STARTED
##	TEST CASE 'NAME': 'CNT' Items; 'CNT' STARTED; 'CNT' PASSED; 'CNT' FAILED; 'CNT' SKIPPED
##	TEST CASE 'NAME': 'CNT' Items; 'CNT' STARTED; 'CNT' PASSED; 'CNT' SKIPPED
##	TEST CASE 'NAME': 'CNT' Items; 'CNT' STARTED; 'CNT' FAILED; 'CNT' SKIPPED
##	TEST CASE 'NAME': 'CNT' Items; 'CNT' STARTED; 'CNT' SKIPPED
##		- cummulative results for test case which had test_items
##	TEST SUITE 'NAME' ITEM ('FULL_TAG') 'ITEM_NAME': PASSED
##	TEST SUITE 'NAME' ITEM ('FULL_TAG') 'ITEM_NAME' PASSED
##	TEST SUITE 'NAME' ITEM ('FULL_TAG') 'ITEM_NAME': FAILED
##	TEST SUITE 'NAME' ITEM ('FULL_TAG') 'ITEM_NAME' FAILED
##	TEST SUITE 'NAME' ITEM ('FULL_TAG') 'ITEM_NAME': SKIPPED
##	TEST SUITE 'NAME' ITEM ('FULL_TAG') 'ITEM_NAME': STARTED
##		- test case item results, Note in some cases the PASSED/FAILED
##		may not appear on the same line as the TEST CASE name.
##		This can be due to configuration errors causing setup errors
##		or because the test case includes test items.
##		For the STARTED case, a subsequent PASSED or FAILED line will
##		also be logged.
##	PERF performance data
##		Performance data reported by test which should be part of performance
##		section of final report
##	additional messages could appear in the log

##
##   identify.res
##   ------------
##
##  ***************************
##	Directory: 'DIRECTORY_NAME'
##		- the given subdirectory exists which contains additional
##		test suites (and an identify.res file)
##		the details of those test suites follows this marker
##	==============
##		- preceeds data for a new test suite
##	Test Suite: 'NAME' ('TAG')
##	'MULTI_LINE_INFORMATION'
##		- test suite name, tag and detailed (multi-line) description
##
##		configuration variable settings from configuration file
##		any messages from test suite setup routines, possibly
##		failures
##	--------------
##		- preceeds data for a new test case
##	Test Case: 'NAME' ('FULL_TAG')
##	'MULTI_LINE_INFORMATION'
##		- test suite and test case name, tag and detailed
##			(multi-line) description.  Description is for the test
##			case only, description from test suite (above) may
##			provide additional information
##
##
## Internal Global variables:
## --------------------------
## These variables are used internally by the tools procedures.
## they may be read by the user, but not modified.
##
## tools_suite_name - name of current test suite (user should not change)
## tools_suite_tag - tag of current test suite (user should not change)
## tools_suite_result - PASSED/FAILED overall result for test suite
## tools_suite_status - status of most recent (current if still in progess)
##		test_suite: "okay" if normal, "skip" if test cases should be
##		skipped, "fail" if failed, and "abort" if the suite was programmatically aborted.
## tools_suite_count - on going count of the number of test suites run
## tools_case_name - name of current test case (user should not change)
## tools_case_tag - tag of current test case (user should not change)
## tools_case_count - on going count of the number of test cases run
## tools_case_passed - on going count of the number of test cases that passed
## tools_case_failed - on going count of the number of test cases that failed
## tools_case_skipped - on going count of the number of test cases skipped
## tools_case_async - flag if test case involves asynchronous test items
## tools_case_status - status of most recent (current if still in progess)
##		test_case: "okay" if normal, "skip" if test items should be
##		skipped, "fail" if failed, and "abort" if the case was programmatically aborted.
## tools_item_name - name of current test case (user should not change)
## tools_item_tag - tag of current test case (user should not change)
## tools_item_count - on going count of the number of test items run
## tools_item_passed - on going count of the number of test items that passed
## tools_item_started - on going count of the number of test items that started
## tools_item_failed - on going count of the number of test items that failed
## tools_item_skipped - on going count of the number of test items skipped
## tools_item_*_list - lists of test item information for async test cases
## test_tmp - directory for temporary files, renamed as save_tmp/... or removed
##			when test completes
##
## Tcl Library Paths
## -----------------
##	When test_expect is run, the Tcl library search path automatically
##	is appended to to include $BUILD_DIR/tcl_libs, $RELEASE_DIR/tcl_libs
##	and the current working directory.  As needed you can create libraries
##	of routines for use by you various test suites and build the
##	corresponding tclIndex to allow those libraries to be run without
##	needing to explicity source the files.

# defaults
set default_TEST_STOP_FAILED none
set default_TEST_SAVE_FAILED yes
set default_TEST_SAVE_TEMP no
set default_TEST_SAVE_FIRST yes
set default_TEST_PARALLEL yes
set default_TEST_MAX_PARALLEL 20
set default_TEST_TIMEOUT_MULT 1
set default_TEST_IDENTIFY no
set default_TEST_SHOW_CONFIG yes
set default_TEST_SHOW_START no
set default_TEST_RESULT_DIR "."
set default_TEST_SERIALIZE_OUTPUT no

set tools_suite_count 0
set tools_suite_failed 0
set log_disable 0

# if we redirect stdout as part of serializing parallel output, we save
# the original stdout here so we can still output some interactive messages
set orig_stdout ""

proc test_suite { tag name information startup cleanup code } {
##
## test_suite
## -----------
## utility procedure to execute a test suite
##
## Usage:
##	test_suite tag name information startup cleanup {
##		code for test suite (typically calls to test_case)
##	}
## Arguments:
##	tag - short tag for test suite, no spaces or special characters allowed
##		used to name directory to save results
##	name - name of test suite
##	information - brief description of test suite
##	startup - procedure to execute at start of the test suite
##	cleanup - procedure to execute at the end of the test suite
##	code = Tcl commands for test suite (typically a list with 1 or
##		more invocations of test_case)
## Returns:
##	none
## Additional Information:
##	Any test_case calls must be made within the code of this
##	control structure
##

	global tools_suite_name
	global tools_suite_tag
	global tools_suite_result
	global tools_suite_status
	global tools_suite_count
	global tools_case_count
	global tools_case_passed
	global tools_case_failed
	global tools_case_skipped
	global env
	global spawn_id
	global spawn_out
	global interact_out
	global expect_out
	global timeout
	global expecting
	global features

	set tools_suite_name $name
	set tools_suite_tag $tag

	if { ! [ identify_mode ] } {
		# create test_tmp for use by startup code
		make_test_tmp
		log_redirect
		log_message "Start $name Test Suite ($tag) [date_stamp]"
		log_message "=============================================================================="
		puts "Executing $name Test Suite ($tag) [date_stamp] ..."

		log_message "$information\n"
		log_message "[date_stamp]"
		log_message ""
	}

	set tools_suite_result "PASSED"
	set tools_suite_status "okay"

	clear_case_counters

	incr tools_suite_count

	set spawn_id "";	# create variable
	set expecting "";	# create variable
	set_timeout 60;		# create and initialize variable

	# test for test_suite_tag in TEST_SUITES/TEST_SKIP_SUITES
	if { [ check_skip_test TEST_SUITES TEST_SKIP_SUITES "{$tag}" ] } {
		show_message "SKIPPED"
		log_message ""
		remove_test_tmp
		return
	}

	if { [ info exists env(TEST_CONFIG_FILE) ] != 1 } {
		remove_test_tmp
		fail_suite "FAILURE during test suite: Environment variable TEST_CONFIG_FILE not set" 1
	}
	if { [ catch { read_config $env(TEST_CONFIG_FILE) } res ] != 0 } {
		remove_test_tmp
		fail_suite "FAILURE during test suite: failed to read $env(TEST_CONFIG_FILE) $res" 1
	}

	# test for test_suite_tag in TEST_SUITES/TEST_SKIP_SUITES
	if { [ check_skip_test TEST_SUITES TEST_SKIP_SUITES "{$tag}" ] } {
		show_message "TEST SUITE $tools_suite_name ($tag) TESTS SKIPPED"
		log_message ""
		remove_test_tmp
		return
	}

	# only identify suites which are not skipped
	if { [ identify_mode ] } {
		log_file -a [test_get_env TEST_RESULT_DIR]/identify.log
		puts "=============================================================================="
		puts "Test Suite: $name ($tag)"
		puts "$information\n"
	}

	test_execute {} {
		setup_expect_after
	}
	set res [ catch {
		set expecting ""
		set_timeout 60
		if { [ catch { eval $startup } err_str2 ] != 0 } {
			cleanup_all_children
			test_execute {} {
				log_restore
				save_test_tmp $tools_suite_tag suite_setup TEST_SAVE_FAILED
			}
			error "suite_setup failed: $err_str2"
                } elseif { "$err_str2" == -1} {
                        # skip test_suite and do not run cleanup proc
                        show_message "TEST SUITE $tools_suite_name ($tag) TESTS SKIPPED"
                        set tools_suite_status "skipped"
                        log_message ""
                        set cleanup "noop"
                        set tools_suite_result "SKIPPED"
                        test_execute {} {
                            log_restore
                            save_test_tmp $tools_suite_tag suite_setup TEST_SAVE_TEMP
			}
                        remove_test_tmp
                } else {
			test_execute {} {
				log_restore
				save_test_tmp $tools_suite_tag suite_setup TEST_SAVE_TEMP
			}

                        set expecting ""
                        set_timeout 60
                        # code is run even in identify mode so that test cases
	    	        # can identify themselves
                        eval $code
                }
	} err_str ] 
	if { $res != 0 } {
		if { $tools_suite_status == "okay" } {
			fail_suite "FAILURE during test suite: $err_str" 0
		}
	}

	set expecting ""
	set_timeout 60
	if { "$cleanup" != "" && "$cleanup" != "noop" } {
		test_execute {} {
			make_test_tmp
			log_redirect
		}
		if { [ catch { eval $cleanup [expr { "$tools_suite_result" == "FAILED" }] } err_str2 ] != 0 } {
			cleanup_all_children
			test_execute {} {
				log_restore
				save_test_tmp $tools_suite_tag suite_cleanup TEST_SAVE_FAILED
			}
			if { $tools_suite_status == "okay" } {
				fail_suite "FAILURE during test suite $cleanup function: $err_str2" 0
			}
		} else {
			test_execute {} {
				log_restore
				save_test_tmp $tools_suite_tag suite_cleanup TEST_SAVE_TEMP
			}
		}
	}
	cleanup_all_children
	set message "TEST SUITE $tools_suite_name: $tools_case_count Cases; $tools_case_passed PASSED"
	if { $tools_case_failed != 0 } {
		append message "; $tools_case_failed FAILED"
	}
	if { $tools_case_skipped != 0 } {
		append message "; $tools_case_skipped SKIPPED"
	}
	show_message "$message"
	show_message "TEST SUITE $tools_suite_name $tools_suite_result"
	show_message "Done $tools_suite_name Test Suite [date_stamp]\n"
	if { [ identify_mode ] } {
		puts ""
	}
	remove_test_tmp
	clear_case_counters

	return
}

proc log_redirect {} {
	global test_tmp

	# redirect the log for a test case
	log_file
	log_file -a $test_tmp/test.log
}

proc log_restore {} {
	# restore the logging and append the test case's log to the
	# main log
	global test_tmp
	set test_log "[test_get_env TEST_RESULT_DIR]/test.log"

	log_file
	# RHEL5, SLES10 and newer distros have flock which can correct a race here
	# on older distros we could use tclX's flock command, but not worth the
	# trouble to fix for RHEL4 at this time
	if { [catch { exec which flock > /dev/null 2>/dev/null } ] == 0 } {
		exec flock $test_log tee -a $test_log < $test_tmp/test.log > /dev/null
	} else {
		exec cat $test_tmp/test.log >> $test_log
	}
	log_file -a $test_log
}

set punchlist_fd ""
set start_time ""

proc open_punchlist { fname } {
	global start_time punchlist_fd

	if { "$start_time" == "" } {
		set start_time [ timestamp -format "%Y/%m/%d %H:%M:%S" ]
	}
	if { "$punchlist_fd" == "" } {
		set punchlist_fd [open "$fname" {WRONLY APPEND CREAT} ]
		fconfigure $punchlist_fd -buffering line
	}
}

proc close_punchlist {} {
	global start_time punchlist_fd

	if { "$punchlist_fd" != "" } {
		close $punchlist_fd
		set punchlist_fd ""
	}
}

# open, append and close punchlist
# any errors are logged but will not cause exceptions
proc append_punchlist { device issue } {
	global start_time punchlist_fd

	set fname "[test_get_env TEST_RESULT_DIR]/punchlist.csv"
	if { [ catch { open_punchlist "$fname" } err_str ] != 0 } {
		log_message "\nERROR: Unable to open punchlist: $fname: $err_str"
	} else {
		if { [ catch { puts $punchlist_fd "$start_time;$device;$issue" } err_str ] != 0 } {
			log_message "\nERROR: Unable to write punchlist: $fname: $err_str"
		}
	}
	catch { close_punchlist }
}

proc setup_expect_after {} {
	global spawn_id

	# set up so all subsequent expects fail on unexpected conditions
	# Note that since expect_after is used, eof or timeout within
	# specific
	# test invocations (such as expect_eof) will be evaluated before
	# those here
	# spawn_id is global and indirect so that as a process is
	# spawned, the expect_after will apply to it automatically
	expect_after {
        	-i spawn_id eof		{ 
					global expecting
					if { "$expecting" != "" } {
						log_message "\nERROR: EOF waiting for: $expecting"
					}
					ignore_rest
					fail_test "eof"
					}
			-i spawn_id timeout	{ 
					global spawn_id expecting

					if { "$expecting" != "" } {
						log_message "\nERROR: timeout ($timeout) waiting for: $expecting"
					}
					# get buffered data
					expect *
					if { "$expect_out(buffer)" != "" } {
						log_message "\nERROR: Received data: $expect_out(buffer)"
						fail_test "timeout: with data"
					} else {
						fail_test "timeout: No data"
					}
				}
	}
}

proc disable_expect_after {} {
	expect_after
}

proc test_case { tag name information startup cleanup code { failure_code "" }} {
##
## test_case
## ---------
## This executes the given test case
##
## Usage:
##	test_case tag "test name" "test description" startup cleanup {
##		code for test
##	}
## Arguments:
##	tag - short tag for test case, no spaces or special characters allowed
##		used to name directory to save results
##	name = test case name
##	information = brief description of test
##	code = Tcl commands for test case (typically a list)
##	startup - procedure to execute at start of the test case
##	cleanup - procedure to execute at the end of the test case
## Returns:
##	0 - test success or explicitly skipped or identify mode
##	1 - test failure or skipped due to earlier fail_suite
## Additional Information:
##	In general the code should avoid doing output such that the
##	output will simply include test cases and PASS/FAIL status
##	Any error returns within $code will indicate a test failure.
##	In general, fail_test should be invoked to indicate such failures.
##	Any detailed logging of test progress should be done using log_message
##
##	This routine always removes/mkdir's test_tmp at the start of each
##	test case and removes it at the end of each test case.
##
##	The failure_code is run before saving test_tmp (if TEST_SAVE_FAILED exported)
##	and prior to running any cleanup code (defined for test_suite)
##	Its typical use to to extract additional information into test_tmp
##	to aid the debug of why the test case failed
##	The log during the test case is kept in test_tmp/test.log (and
##	saved based on TEST_SAVE_FAILED) and at the end of the test case it is
##	appended to the main test.log

	global tools_suite_name
	global tools_suite_tag
	global tools_suite_result
	global tools_suite_count
	global tools_suite_status
	global tools_case_name
	global tools_case_tag
	global tools_case_count
	global tools_case_passed
	global tools_case_failed
	global tools_case_skipped
	global tools_case_status
	global tools_item_name
	global tools_item_tag
	global tools_item_count
	global tools_item_passed
	global tools_item_started
	global tools_item_failed
	global tools_item_skipped
	global tools_item_status
	global tools_case_async
	global tools_item_tag_list
	global tools_item_full_tag_list
	global tools_item_name_list
	global tools_item_status_list
	global orig_stdout

	global env
	# force spawn_id returned by any spawn's within $code to be global
	global spawn_id
	global spawn_out
	global interact_out
	global expect_out
	global timeout
	global expecting
	global features

	set tools_case_status "okay"
	incr tools_case_count

	clear_item_counters
	set tools_case_async 0
	set tools_item_tag_list {}
	set tools_item_full_tag_list {}
	set tools_item_name_list {}
	set tools_item_status_list {}

	set full_tag "$tools_suite_tag\.$tag"
	set tools_case_name "$name"
	set tools_case_tag "$full_tag"

	if { ! [ identify_mode ] } {
		make_test_tmp

		log_redirect
		if { [ show_start ] } {
			if { $orig_stdout != "" } {
				puts $orig_stdout "Executing TEST SUITE $tools_suite_name CASE ($full_tag) $name ... "
			} else {
				puts "Executing TEST SUITE $tools_suite_name CASE ($full_tag) $name ... "
			}
		}
		puts -nonewline "TEST SUITE $tools_suite_name CASE ($full_tag) $name "
		log_message "TEST SUITE $tools_suite_name CASE ($full_tag) $name:"
		log_message "$information\n"
		log_message "[date_stamp]"
		log_message ""
	}

	# test for tag or test_suite_tag.test_case_tag in TEST_CASES/TEST_SKIP_CASES
	if { $tools_suite_status == "skip"
	     || [ check_skip_test TEST_CASES TEST_SKIP_CASES "{$tag} {$full_tag}" ] } {
		test_execute {} {
			show_message "SKIPPED"
			log_message ""
			log_restore
		}
		set tools_case_status "skip"
		incr tools_case_skipped
		remove_test_tmp
		if { $tools_suite_status == "skip" } {
			return 1
		} else {
			return 0
		}
	}

	# only identify cases which are not skipped
	if { [ identify_mode ] } {
		puts "------------------------------------------------------------------------------"
		# puts "Test Suite: $tools_suite_name ($tools_suite_tag)"
		puts "Test Case: $name ($full_tag)"
		puts "$information\n"
	}

	set res [ catch {
		set expecting ""
		set_timeout 60
		if { [ catch { eval $startup } err_str2 ] != 0 } {
			error "case_setup failed: $err_str2"
		} elseif { "$err_str2" == -1} {
                        # skip test_case and do not run cleanup proc
                        set tools_case_status "skip"
                        set res 1
                    } else {
                        set expecting ""
                        set_timeout 60
                        # the handling of TEST_IDENTIFY for test_items within
	            	# a test_case is TBD, at this time, they are ignored.
         		test_execute {} { eval $code }
                }
	} err_str ] 

	set did_cleanup 0
	if { $res != 0 && $tools_case_status == "skip" } {
		# clear result, we will run cleanup for skipped
		set res 0
	}
	if { $res == 0 } {
		set expecting ""
		set_timeout 60
		if { "$cleanup" != "" && "$cleanup" != "noop"
		     && [ catch { eval $cleanup 0 } err_str2 ] != 0 } {
			set err_str "$cleanup function FAILED: $err_str2"
			set res 1
		}
		set did_cleanup 1
	}
	if { $res == 0 && $tools_item_failed != 0 } {
		set err_str "$tools_item_failed test items FAILED"
		set res 1
	}
	if { $res != 0 && ! [ identify_mode ] } {
		# output message immediately after failure to make reading
		# log easier, also output immediately before FAILED message
		log_message "\nTEST CASE FAILURE=$err_str"
		if { ! $did_cleanup && "$failure_code" != "" } {
			set expecting ""
			set_timeout 60
			if { [ catch { eval $failure_code } err_str2 ] != 0 } {
				log_message "failure handling code FAILED: $err_str2"
			}
		}
		set expecting ""
		set_timeout 60
		if { ! $did_cleanup && "$cleanup" != "" && "$cleanup" != "noop"
		     && [ catch { eval $cleanup 1 } err_str2 ] != 0 } {
			log_message "$cleanup function FAILED: $err_str2"
		}
		cleanup_all_children
		log_message "\nFAILURE=$err_str"
		test_item_totals
		clear_item_counters
		puts "FAILED"
		log_message "\n[date_stamp]\n"
		log_message "\nTEST SUITE $tools_suite_name CASE ($full_tag) $name: FAILED\n"
		set tools_suite_result "FAILED"
		incr tools_case_failed
		set tools_case_status "fail"
		log_restore
		save_test_tmp $tools_suite_tag $tag TEST_SAVE_FAILED
		set stop_failed [test_get_env TEST_STOP_FAILED]
		if { "$stop_failed" == "case" || "$stop_failed" == "any" } {
			fail_suite "test failed with TEST_STOP_FAILED set" $stop_failed
		} else {
			return 1
		}
	} else {
		cleanup_all_children
		test_execute {} {
			test_item_totals
			if { $tools_case_status == "skip" } {
				incr tools_case_skipped
				set status "SKIPPED: $err_str"
			} elseif {$tools_item_count && $tools_item_skipped == $tools_item_count } {
				# all items were skipped, report case as skipped
				incr tools_case_skipped
				set status "SKIPPED"
			} else {
				incr tools_case_passed
				set status "PASSED"
			}
			puts "$status"
			log_message "\n[date_stamp]\n"
			log_message "\nTEST SUITE $tools_suite_name CASE ($full_tag) $name: $status\n"
			log_restore
			save_test_tmp $tools_suite_tag $tag TEST_SAVE_TEMP
		}
		clear_item_counters
		return 0
	}
}

proc test_item_totals { } {
# if there were test_items in this test_case, report the test item
# statuses and the totals
# and re-output the test_case details for the PASSED/FAILED message to come
	global tools_item_count
	global tools_item_passed
	global tools_item_started
	global tools_item_failed
	global tools_item_skipped
	global tools_case_async
	global tools_case_name
	global tools_case_tag
	global tools_suite_name
	global tools_item_status_list

	if { $tools_item_count > 0 } {
		foreach message $tools_item_status_list {
			show_message $message
		}
		if { $tools_case_async } {
			set message "TEST CASE $tools_case_name: $tools_item_count Items; $tools_item_started STARTED"
			if { $tools_item_passed != 0 } {
				append message "; $tools_item_passed PASSED"
			}
			if { $tools_item_failed != 0 } {
				append message "; $tools_item_failed FAILED"
			}
		} else {
			set message "TEST CASE $tools_case_name: $tools_item_count Items; $tools_item_passed PASSED"
			if { $tools_item_failed != 0 } {
				append message "; $tools_item_failed FAILED"
			}
		}
		if { $tools_item_skipped != 0 } {
			append message "; $tools_item_skipped SKIPPED"
		}
		show_message "$message"
		puts -nonewline "TEST SUITE $tools_suite_name CASE ($tools_case_tag) $tools_case_name "
	}
}

# initialize when first load this module
set test_tmp "test_tmp[pid]"

proc make_test_tmp { } {
# create the test_tmp directory

	global test_tmp

	# set here in case sub-process in a parallel execution set of tests
	set test_tmp "[test_get_env TEST_RESULT_DIR]/test_tmp[pid]"
	catch { exec rm -rf $test_tmp }
	exec mkdir $test_tmp
}

proc test_get_env { envvar } {
##
## test_get_env
## --------------
## get given env var value, if not exported, returns value of default_$envvar
##
## Arguments:
##	envvar - variable name to get value for
## Returns:
##	value to use for variable

	global env
	global default_$envvar

	if { [ info exists env($envvar)] } {
		return $env($envvar)
	} else {
		return [ set default_$envvar ]
	}
}

proc test_check_yn { value } {
##
## test_check_yn
## --------------
## determine if value is yes (or y) or no
##
## Arguments:
##	value - value to check
## Returns:
##	1 - yes
##	0 - no (or invalid value)

	return [ string match {[yY]*} $value ]
}


proc test_save_temp { } {
##
## test_save_temp
## --------------
## return if TEST_SAVE_TEMP behaviour should be performed
##
## Returns:
##	1 - yes
##	0 - no

	return [ test_check_yn [ test_get_env TEST_SAVE_TEMP] ]
}

proc save_test_tmp { suite_tag case_tag envvar } {
# save the test_tmp directory to save_tmp/$suite_tag/$case_tag
# if $env(envvar) is yes

	global env
	global test_tmp

	set save_tmp [test_get_env TEST_RESULT_DIR]/save_tmp

	if { [ test_check_yn [ test_get_env TEST_SAVE_FIRST ] ] 
		 && [ file exists $save_tmp/$suite_tag/$case_tag ] } {
		log_message "$test_tmp not saved due to existing $save_tmp/$suite_tag/$case_tag with TEST_SAVE_FIRST enabled"
		catch { exec rm -rf $test_tmp }
		return
	}
	catch { exec rm -fr $save_tmp/$suite_tag/$case_tag }
	if { [ test_check_yn [ test_get_env $envvar ] ] } {
		log_message "$test_tmp saved to $save_tmp/$suite_tag/$case_tag"
		if { [ file exists $save_tmp ] != 1 } {
			exec mkdir -p $save_tmp
		}
		if { [ file exists $save_tmp/$suite_tag ] != 1 } {
			exec mkdir -p $save_tmp/$suite_tag
		}
		exec mv $test_tmp $save_tmp/$suite_tag/$case_tag
	} else {
		catch { exec rm -rf $test_tmp }
		# rmdir can fail if directory not empty, thats ok
		catch { exec rmdir $save_tmp/$suite_tag }
	}
}

proc remove_test_tmp { } {
# remove the test_tmp directory

	global test_tmp

	catch { exec rm -rf $test_tmp }
}

proc check_skip_test { envvar skipenvvar match_list } {
#
# check_skip_test
# -------------------
# determine if the current test should be skipped
#
# Usage:
#	check_skip_test
#
# Arguments:
#	envvar - environment variable to base tests on (TEST_SUITES, TEST_CASES
#		or TEST_ITEMS)
#	skipenvvar - environment variable to base skipping tests on
#		(TEST_SKIP_SUITES, TEST_SKIP_CASES or TEST_SKIP_ITEMS)
#	match_list - list of tags to attempt to match against each entry in
#		the environment variable
# Returns:
#	1 - this test should be skipped
#	0 - this test should be executed
# Additional Information:
#	If a tag in match_list matches both envvar and skipenvvar, 1 is returned

	global env

	set result 0

	if { [ info exists env($envvar) ] } {
		# limit to those we find in envvar
		set result 1
		foreach item $env($envvar) {
			foreach tag $match_list {
				if { [ string match $item $tag ] } {
					set result 0
				}
			}
		}
	}
	if { $result == 1 } {
		# already decided to skip, no need to test skipenvvar
		return $result
	}
	if { [ info exists env($skipenvvar) ] } {
		# skip those we find in skipenvvar
		foreach item $env($skipenvvar) {
			foreach tag $match_list {
				if { [ string match $item $tag ] } {
					set result 1
				}
			}
		}
	}
	return $result
}

proc test_parallel { } {
##
## test_parallel
## --------------
## determine if parallel tests should be run based on TEST_PARALLEL and
## TEST_IDENTIFY
##
## Returns:
##	1 - yes
##	0 - no

#
# If this version of TCL has thread support enabled, disable
# parallel operations, because thread support causes hangs
# in TCL fork processing.
#

	global tcl_platform
	#if { [ catch { set thread_tcl $tcl_platform(threaded) } result] } {
		set thread_tcl 0
	#}

	return [ expr ! [identify_mode] \
			&& [ test_check_yn [ test_get_env TEST_PARALLEL] ] \
			&& [ test_get_env TEST_MAX_PARALLEL ] > 1 \
			&& {$thread_tcl == 0 } ]
}

proc test_item { tag name information startup cleanup code { failure_code "" }} {
##
## test_item
## ---------
## This executes the given test item
##
## Usage:
##	test_item tag "test name" "test description" startup cleanup {
##		code for test
##	}
## Arguments:
##	tag - short tag for test case, no spaces or special characters allowed
##		used to name directory to save results
##	name = test case name
##	information = brief description of test
##	code = Tcl commands for test item (typically a list)
##	startup - procedure to execute at start of the test item
##	cleanup - procedure to execute at the end of the test item
## Returns:
##	0 - test success or explicitly skipped
##	1 - test failure or skipped due to earlier fail_suite
## Additional Information:
##	In general the code should avoid doing output such that the
##	output will simply include test items and PASS/FAIL status
##	Any error returns within $code will indicate a test failure.
##	In general, fail_test should be invoked to indicate such failures.
##	Any detailed logging of test progress should be done using log_message
##
##	Its typical use is to extract additional information into test_tmp
##	to aid the debug of why the test item failed
##	The log during the test item is kept in test_tmp/test.log (and
##	saved based on TEST_SAVE_FAILED) and at the end of the test item it is
##	appended to the main test.log

	global tools_suite_name
	global tools_suite_tag
	global tools_suite_result
	global tools_suite_count
	global tools_suite_status
	global tools_case_name
	global tools_case_tag
	global tools_name_name
	global tools_name_tag
	global tools_item_count
	global tools_item_passed
	global tools_item_started
	global tools_item_failed
	global tools_item_skipped
	global tools_item_status
	global tools_case_async
	global tools_case_status
	global tools_item_tag_list
	global tools_item_full_tag_list
	global tools_item_name_list

	global env
	# force spawn_id returned by any spawn's within $code to be global
	global spawn_id
	global spawn_out
	global interact_out
	global expect_out
	global timeout
	global expecting
	global features

	if { $tools_item_count == 0 } {
		# newline after TEST CASE name line
		puts "..."
	}

	incr tools_item_count

	set tools_item_result "PASSED"
	set tools_item_status "okay"

	set full_tag "$tools_case_tag\.$tag"
	set tools_item_name "$name"
	set tools_item_tag "$full_tag"

	puts -nonewline "TEST SUITE $tools_suite_name ITEM ($full_tag) $name "
	log_message "TEST SUITE $tools_suite_name ITEM ($full_tag) $name:"
	log_message "$information\n"
	log_message "[date_stamp]"
	log_message ""

	# test for tag or test_suite_tag.test_case_tag.test_item_tag
	# in TEST_ITEMS
	if { $tools_suite_status == "skip"
	     || [ check_skip_test TEST_ITEMS TEST_SKIP_ITEMS "{$tag} {$full_tag}" ] } {
		show_message "SKIPPED"
		log_message ""
		incr tools_item_skipped
		set tools_item_status "skip"
		if { $tools_suite_status == "skip" } {
			return 1
		} else {
			return 0
		}
	}

	set res [ catch {
		set expecting ""
		set_timeout 60
		if { [ catch { uplevel $startup } err_str2 ] != 0 } {
			error "item_setup failed: $err_str2"
		} elseif { "$err_str2" == -1} {
                        # skip test_item and do not run cleanup proc
                        set tools_item_status "skip"
                        set res 1
                } else {
                        set expecting ""
                        set_timeout 60
                        uplevel $code
                }
	} err_str ]
	if { $res != 0 && ( $tools_item_status == "skip" || $tools_case_status == "skip" ) } {
		# clear result, we will run cleanup for skipped
		set res 0
	}
	set did_cleanup 0
	if { $res == 0 } {
		set expecting ""
		set_timeout 60
		if { "$cleanup" != "" && "$cleanup" != "noop"
		     && [ catch { eval $cleanup 0 } err_str2 ] != 0 } {
			set err_str "$cleanup function FAILED: $err_str2"
			set res 1
		}
		set did_cleanup 1
	}
	if { $res != 0 } {
		# output message immediately after failure to make reading
		# log easier, also output immediately before FAILED message
		log_message "\nFAILURE=$err_str"
		if { ! $did_cleanup && "$failure_code" != "" } {
			set expecting ""
			set_timeout 60
			if { [ catch { uplevel $failure_code } err_str2 ] != 0 } {
				log_message "failure handling code FAILED: $err_str2"
			}
		}
		set expecting ""
		set_timeout 60
		if { ! $did_cleanup && "$cleanup" != "" && "$cleanup" != "noop"
		     && [ catch { uplevel $cleanup 1 } err_str2 ] != 0 } {
			log_message "$cleanup function FAILED: $err_str2"
		}
		log_message "\nFAILURE=$err_str"
		puts "FAILED"
		log_message "\n[date_stamp]\n"
		log_message "\nTEST SUITE $tools_suite_name ITEM ($full_tag) $name: FAILED\n"
		set tools_suite_result "FAILED"
		incr tools_item_failed
		set tools_item_status "fail"
		set stop_failed [test_get_env TEST_STOP_FAILED]
		if { "$stop_failed" == "any" } {
			fail_suite "test failed with TEST_STOP_FAILED set" $stop_failed
		} else {
			return 1
		}
	} else {
		if { $tools_case_async } {
			set status "STARTED"
			incr tools_item_started
			lappend tools_item_tag_list "$tag"
			lappend tools_item_name_list "$name"
			lappend tools_item_full_tag_list "$full_tag"
		} else {
			if { $tools_item_status == "skip" || $tools_case_status == "skip" } {
				set status "SKIPPED: $err_str"
				incr tools_item_skipped
			} else {
				set status "PASSED"
				incr tools_item_passed
			}
		}
		puts "$status"
		log_message "\n[date_stamp]\n"
		log_message "\nTEST SUITE $tools_suite_name ITEM ($full_tag) $name: $status\n"
		if { $tools_case_status == "skip" } {
			# pass exception up so rest of test case is skipped
			error "$err_str" "$err_str"
		}
		return 0
	}
}

proc async_case {} {
##
## async_case
## ----------
## Indicate that the test_items in the current test case run asynchronously
##
## Usage:
##	async_case
## Additional Information:
##	This causes the test_item to report FAILED (and but counts) or
##	Started.  If it expected that later code in the test_case
##	(typically the cleanup routine) will invoke the test_item_status
##	routine to indicate the pass/fail status for each test item
##	for inclusion in the test.res report.
	global tools_case_async

	set tools_case_async 1
	log_message "Asynchronously run test items for this test case"
}

# initialize value when source is first autosourced in
global tools_case_async
set tools_case_async 0

proc get_async_case {} {
##
## get_async_case
## ----------
## return current value for async_case as set via async_case call
##
## Usage:
##	get_async_case

	global tools_case_async

	return $tools_case_async 
}

proc test_item_status { item_info } {
##
## test_item_status
## ----------------
## Report the status for a test item in an asynchronously executed test case
##
## Usage:
##	test_item_status item_info
## Arguments:
##	info_info:
##		test_item_tag status
##			test_item_tag - tag provided to test_item call
##			status - status of test item (PASSED/FAILED/SKIPPED)
## Additional Information:
##	test_item_tags not found in the tools_item_tag_list are assumed to
##		have already been reported on.  This is maintains the
##		list of test_items which have been successfully started
##		but not yet reported status for
##

	global tools_item_passed tools_item_failed tools_item_skipped
	global tools_item_tag_list
	global tools_item_full_tag_list
	global tools_item_name_list tools_suite_name
	global tools_item_status_list

	set test_item_tag [ lindex $item_info 0 ]
	set status [ lindex $item_info 1 ]

	set index [ lsearch -exact $tools_item_tag_list $test_item_tag ]
	if { $index == -1 } {
		return
	}
	set name [ lindex $tools_item_name_list $index ]
	set full_tag [ lindex $tools_item_full_tag_list $index ]
	set tools_item_tag_list [ lreplace $tools_item_tag_list $index $index ]
	set tools_item_name_list [ lreplace $tools_item_name_list $index $index ]
	set tools_item_full_tag_list [ lreplace $tools_item_full_tag_list $index $index ]
	if { "$status" == "PASSED" } {
		incr tools_item_passed
	} elseif { "$status" == "SKIPPED" } {
		incr tools_item_skipped
	} else {
		incr tools_item_failed
	}
	lappend tools_item_status_list "TEST SUITE $tools_suite_name ITEM ($full_tag) $name $status"
}

proc fail_test { info } {
##
## fail_test
## ---------
## abort the current test and indicate a failure to test_case
##
## Usage:
##	fail_test info
## Arguments:
##	info - a brief comment as to why the test failed, typically what
##		was received by expect that wasn't expected ( ie. "eof")
## Returns:
##	error exception
## Additional Information:
##	This procedure should be called anytime a test case detects a failure
##	The error return will cause the stack to unwrap all the way up to
##	test_case (which should be the only error trap in the stack)
##	Note that an alternative to using this routine is to simply have an
##	error return (return -code error or a failed tcl command) within the
##	test case code block.
##	However this has the advantage of logging "$info" to the test.log

	log_message "\nERROR: $info"
	error "$info" "$info"
}

proc fail_suite { info { abort_now 1 } } {
##
## fail_suite
## ---------
## abort the current test suite and indicate a failure
##
## Usage:
##	fail_suite info [abort_now]
## Arguments:
##	info - a brief comment as to why the test suite failed
##	abort_now - 0/1 should the suite abort immediately or
##		simply ignore all test_cases calls until end_suite
##		default is 1
## Returns:
##	abort_now = 0 -> nothing
##	abort_now = 1 -> error exception
## Additional Information:
##	Typically used when global environment requirements are not met
##	or when subsequent test cases depend on the success of a prior test case
##
##	This routine can only be called within a test_suite code block.
##
##	If abort_now is 1, the code block aborts immediately.
##
##	However if it is 0, the test_suite code block will continue to be
##	executed, however all test_case invocations within the code block will
##	be ignored.
##
##	In general abort_now should be 1.
##
##	Only if there is special cleanup code between tests (which really
##	should be done by the cleanup code supplied to test_suite) should
##	abort_now be 0.

	global tools_suite_name
	global tools_suite_result
	global tools_suite_status
	global tools_suite_failed

	show_message "\nTEST SUITE FAILURE: $info"
	show_message "TEST SUITE $tools_suite_name TESTS ABORTED"
	log_message ""
	set tools_suite_result "FAILED"
	incr tools_suite_failed
	if { $abort_now == 1 } {
		set tools_suite_status "abort"
		error "$info" "$info"
	} else {
		set tools_suite_status "skip"
		return
	}
}

proc skip_case { info } {
##
## skip_case
## ---------
## stop the current test case and indicate it was skipped
##
## Usage:
##	skip_case info
## Arguments:
##	info - a brief comment as to why the test case was skipped
## Returns:
##	does not return, throws and exception
## Additional Information:
##	Typically used when global environment requirements are not met
##	or when subsequent test cases depend on the success of a prior test case
##
##	This routine can only be called within a test_case code block.
##	If called within a test_item, the rest of the test case will be skipped
##	Of course if the item is within a parallel block, the skip will only
##	affect other items within the same subprocess. For example:
##		test_case {
##			parallel x { a b c } {
##				test_item 1$x
##				test_item 2$x
##			}
##		}
##	A skip_case within item 1a will only affect item 2a, it will not affect
##	1b, 1c, 2b, 2c. However the final results for the case will be tabulated
##	as 2 skipped, 4 passed (or failed)
##

	global tools_case_status

	# test_case (or test_item) will catch exception and report to log and stdout
	log_message ""
	set tools_case_status "skip"
	error "$info" "$info"
}

proc skip_item { info } {
##
## skip_item
## ---------
## stop the current test item and indicate it was skipped
##
## Usage:
##	skip_item info
## Arguments:
##	info - a brief comment as to why the test item was skipped
## Returns:
##	does not return, throws and exception
## Additional Information:
##	Typically used when global environment requirements are not met
##	or when subsequent test items depend on the success of a prior test item
##
##	This routine can only be called within a test_item code block.
##

	global tools_item_status

	# test_item will catch exception and report to log and stdout
	log_message ""
	set tools_item_status "skip"
	error "$info" "$info"
}

proc clear_case_counters {} {
# clear counters for cases and items completed within a test suite
	global tools_case_count 0
	global tools_case_passed 0
	global tools_case_failed 0
	global tools_case_skipped 0

	set tools_case_count 0
	set tools_case_passed 0
	set tools_case_failed 0
	set tools_case_skipped 0
	clear_item_counters
}

proc clear_item_counters {} {
# clear counters for items completed within a test case
	global tools_item_count 0
	global tools_item_started 0
	global tools_item_passed 0
	global tools_item_failed 0
	global tools_item_skipped 0

	set tools_item_count 0
	set tools_item_started 0
	set tools_item_passed 0
	set tools_item_failed 0
	set tools_item_skipped 0
}

# set of counters which are saved and passed across process boundary
# during parallel execution on completion of a sub-process
set saved_counters { suite_count suite_failed
			 case_count case_passed case_failed case_skipped
			 item_count item_started item_passed item_failed item_skipped}

proc tools_clear_saved_counters {} {
# clear all counters, utility function to aid counter management during
# parallel execution, by clearing counters we can identify the changes
# which occurred in a sub-process for ultimate tabulations back into
# the parent processes counters
#
# case_status and item_status are text values, not counters

	global saved_counters
	global tools_case_status
	global tools_item_status

	foreach counter $saved_counters {
		global tools_$counter
		set tools_$counter 0
	}
	set tools_case_status "okay"
	set tools_item_status "okay"
}

proc tools_save_counters { {errorcode 0} {errorinfo ""} } {
#
# output all counters to a [pid] based file
# Used for parallel test execution in sub-processes
#
# Arguments:
#	errorcode - 0/1, should an error be propigated back to parent process
#	errorinfo - info to pass back to parent in thrown error, only used if
#		errorcode is 1

	global saved_counters
	global tools_case_status
	global tools_item_status

	set filename "/tmp/testcount.[pid]"
	catch { eval exec rm -f $filename }
	set fileid [ open $filename "w" ]
	if { [ catch {
		foreach counter $saved_counters {
			global tools_$counter
			puts $fileid "$counter [set tools_$counter]"
		}
		puts $fileid "case_status $tools_case_status"
		puts $fileid "item_status $tools_item_status"
		puts $fileid "errorcode $errorcode"
		# errorinfo must be the last counter, $errorinfo could be multi-line string
		puts $fileid "errorinfo $errorinfo"
	} res ] != 0 } {
		show_message "save_counters failed: $res"
		catch { close $fileid }
		# remove file so parent recognizes a critical failure in child
		catch { eval exec rm -f $filename }
	} else {
		catch { close $fileid }
	}
}

proc tools_clear_tmp_counters {} {
#
# clear the counters into which tools_update_tmp_counters adds its totals

	global tools_tmp_exit
	global saved_counters
	global tools_tmp_case_status
	global tools_tmp_item_status
	global tools_tmp_errorcode
	global tools_tmp_errorinfo

	set tools_tmp_exit 0
	foreach counter $saved_counters {
		global tools_tmp_$counter
		set tools_tmp_$counter 0
	}
	set tools_tmp_case_status "okay"
	set tools_tmp_item_status "okay"
	set tools_tmp_errorcode 0
	set tools_tmp_errorinfo ""
}

proc tools_update_tmp_counters { pid } {
#
# fetch counters created by tools_save_counters and update tools_tmp_*
	global env
	global tools_tmp_case_status
	global tools_tmp_item_status
	global tools_tmp_errorcode
	global tools_tmp_errorinfo
	global tools_tmp_exit

	set filename "/tmp/testcount.$pid"
	if { ! [ file exists "$filename" ] } {
		set tools_tmp_exit 1
		return
	}
	set fileid [ open $filename "r" ]
	if { [ catch {
		while { [ gets $fileid line ] != -1 } {
			set counter [ lindex $line 0 ]
			set value [ lreplace $line 0 0 ]
			if { "$counter" == "case_status" } {
				if { "$value" == "skip"} {
					set tools_tmp_case_status $value
				}
			} elseif { "$counter" == "item_status" } {
				if { "$value" == "skip"} {
					set tools_tmp_item_status $value
				}
			} elseif { "$counter" == "errorcode" } {
				if { $value } {
					set tools_tmp_errorcode $value
				}
			} elseif { "$counter" == "errorinfo" } {
				if { "$value" != "" } {
					set tools_tmp_errorinfo "$value"
				}
				# rest of file could be part of a long error message
				while { [ gets $fileid line ] != -1 } {
					append tools_tmp_errorinfo "\n$line"
				}
			} else {
				global tools_tmp_$counter
				incr tools_tmp_$counter $value
			}
		}
		close $fileid
	} res ] != 0 } {
		catch { close $fileid }
		set tools_tmp_exit 1
	}
	catch { eval exec rm -f $filename }
}

proc tools_remove_tmp_counters { pid } {
#
# remove counters file created by tools_save_counters
	set filename "/tmp/testcount.$pid"
	catch { eval exec rm -f $filename }
}

proc add_item_counters { } {
# add tools_tmp_item_* to tools_item_*
	global tools_suite_result
	global tools_item_count
	global tools_item_started
	global tools_item_passed
	global tools_item_failed
	global tools_item_skipped
	global tools_tmp_item_count
	global tools_tmp_item_started
	global tools_tmp_item_passed
	global tools_tmp_item_failed
	global tools_tmp_item_skipped

	incr tools_item_count $tools_tmp_item_count
	incr tools_item_started $tools_tmp_item_started
	incr tools_item_passed $tools_tmp_item_passed
	incr tools_item_failed $tools_tmp_item_failed
	incr tools_item_skipped $tools_tmp_item_skipped
	if { $tools_tmp_item_failed > 0 } {
		set tools_suite_result "FAILED"
	}
}

proc add_case_counters { } {
# add tools_tmp_case_* to tools_case_* and add_item_counters
	global tools_suite_result
	global tools_case_count
	global tools_case_passed
	global tools_case_failed
	global tools_case_skipped
	global tools_tmp_case_count
	global tools_tmp_case_started
	global tools_tmp_case_passed
	global tools_tmp_case_failed
	global tools_tmp_case_skipped

	incr tools_case_count $tools_tmp_case_count
	incr tools_case_passed $tools_tmp_case_passed
	incr tools_case_failed $tools_tmp_case_failed
	incr tools_case_skipped $tools_tmp_case_skipped
	add_item_counters
	if { $tools_tmp_case_failed > 0 } {
		set tools_suite_result "FAILED"
	}
}

proc add_suite_counters { } {
# add tools_tmp_* to tools_*
	global tools_suite_count
	global tools_suite_failed

	incr tools_suite_count $tools_tmp_suite_count
	incr tools_suite_failed $tools_tmp_suite_failed
	add_case_counters
}

proc tools_check_process_results {} {
# check sub process results
# Returns:
#	1 - no failures which would justify a failure of parent
#	0 - failures which will result in failure of parent

	global tools_tmp_exit
	global tools_tmp_suite_count
	global tools_tmp_suite_failed
	global tools_tmp_case_count
	global tools_tmp_case_failed
	global tools_tmp_item_count
	global tools_tmp_item_failed
	global tools_tmp_case_status
	global tools_tmp_item_status
	global tools_tmp_errorcode
	global tools_tmp_errorinfo

	# beyond just adding counters, we simulate the effect of failures
	# by looking at the _count counters to determine what was started
	# within the parallel (to determine our context) and check

	if { $tools_tmp_exit } {
		return 0
	}
	if { $tools_tmp_suite_count > 0 } {
		# must have started test_suites within parallel block
	} elseif { $tools_tmp_case_count > 0 } {
		# must have started test_case's within an existing test_suite
		if { $tools_tmp_suite_failed } {
			return 0
		}
	} elseif { $tools_tmp_item_count > 0 } {
		# must have started test_item's within an existing test_case
		if { $tools_tmp_suite_failed || $tools_tmp_case_failed } {
			return 0
		}
	} else {
		# parallel code within an existing item or in a general script
		if { $tools_tmp_suite_failed || $tools_tmp_case_failed
			 || $tools_tmp_item_failed } {
			return 0
		}
	}
	# if we didn't find a failure above, but there was an error from
	# the sub-process
	if { $tools_tmp_errorcode } {
		return 0
	}
	return 1
}

proc tools_propigate_process_results {} {
#
# Used for parallel test execution, takes tools_tmp_* counters and updates
# tools_* counters and performs fail_test, fail_suite, or error as needed

	global tools_suite_count
	global tools_suite_failed
	global tools_case_count
	global tools_case_failed
	global tools_item_count
	global tools_item_failed
	global tools_tmp_exit
	global tools_tmp_suite_count
	global tools_tmp_suite_failed
	global tools_tmp_case_count
	global tools_tmp_case_failed
	global tools_tmp_item_count
	global tools_tmp_item_failed
	global tools_tmp_case_status
	global tools_tmp_item_status
	global tools_tmp_errorcode
	global tools_tmp_errorinfo

	# beyond just adding counters, we simulate the effect of failures
	# by looking at the _count counters to determine what was started
	# within the parallel (to determine our context) and check
	# failures to invoke the proper failure mechanism to cause the
	# appropriate global effect in the calling process
	if { $tools_tmp_exit } {
		# one of the count files was missing, process must have exited
		# we should do the same
		exit 1
	}
	if { $tools_tmp_suite_count > 0 } {
		# must have started test_suites within parallel block
		add_suite_counters
	} elseif { $tools_tmp_case_count > 0 } {
		# must have started test_case's within an existing test_suite
		add_case_counters
		if { $tools_tmp_suite_failed } {
			fail_suite "suite failure during parallel execution"
		}
	} elseif { $tools_tmp_item_count > 0 } {
		# must have started test_item's within an existing test_case
		add_item_counters
		if { $tools_tmp_suite_failed } {
			fail_suite "suite failure during parallel execution"
		}
		if { $tools_tmp_case_failed } {
			fail_test "case failure during parallel execution"
		}
		if { $tools_tmp_case_status == "skip" } {
			# case skipped within parallel execution
			skip_case "$tools_tmp_errorinfo"
		}
	} else {
		# parallel code within an existing item or in a general script
		if { $tools_tmp_suite_failed } {
			fail_suite "suite failure during parallel execution"
		}
		if { $tools_tmp_case_failed } {
			fail_test "case failure during parallel execution"
		}
		if { $tools_tmp_item_failed } {
			fail_test "item failure during parallel execution"
		}
		if { $tools_tmp_case_status == "skip" } {
			# case skipped within parallel execution
			skip_case "$tools_tmp_errorinfo"
		}
		if { $tools_tmp_item_status == "skip" } {
			# item skipped within parallel execution
			skip_item "$tools_tmp_errorinfo"
		}
	}
	# if we didn't find a failure above, but there was an error from
	# the sub-process, invoke the general error mechanism
	if { $tools_tmp_errorcode } {
		error "$tools_tmp_errorinfo" "$tools_tmp_errorinfo"
	}
}

proc show_more_lines { linecnt { id "" } } {
##
## show_more_lines
## ---------------
## show the next few lines from the spawned program's output
##
## Usage:
##	show_more_lines line_cnt [id]
## Arguments:
##	line_cnt - number of lines to attempt to show
##	id - spawn_id of child, default is global spawn_id
## Returns:
##	nothing
## Additional Information:
##	If a timeout or eof occurs before line_cnt lines are shown, this
##	procedure returns without an error

	global expect_out

	if { "$id" == "" } {
		global spawn_id
	} else {
		# use a local, expect_after isn't involved anyway
		# since we cover eof and default cases here
		set spawn_id $id
	}

	# local timeout variable is purposely used so that
	# any test_suite global settings are not altered
	set timeout 1
	for { set i 0 } { $i < $linecnt } { incr i } {
		expect {
			"\n"	noop
			eof	return
			default	break
		}
	}
	log_message ""
	return
}

proc log_message { string } {
##
## log_message
## -----------
## output an informative message
##
## Usage:
##	log_message string
## Arguments:
##	string - string to put to log, a newline will be appended
## Returns:
##	nothing
## Additional Information:
##	This will log the given string to the log_file, if there is
##	no current log_file, it is output to stdout
##	This should be used for all detailed output routines within
##	test scripts or utility procedures.  The case were it sends output
##	to stdout, allows for interactive execution of commands which use this
##	Otherwise, within a typical expect test script, the log_file will get
##	the detailed output and stdout will only get brief messages
##
##	This adds a newline at the end of string
##
##	No output nor logging is generated for identify_mode

	global log_disable

	if { [ identify_mode ] } {
		return
	}
	if { [log_file -info] == "" } {
		if { ! $log_disable } {
			puts "$string"
		}
	} else {
		send_log "$string\n"
	}
	return
}

proc show_message { string } {
##
## show_message
## -----------
## output an informative message to stdout and the log
##
## Usage:
##	show_message string
## Arguments:
##	string - string to put to log and stdout, a newline will be appended
## Returns:
##	nothing
## Additional Information:
##	This will log the given string to the log_file, if there is one,
##	and stdout
##
##	This adds a newline at the end of string
##
##	No output nor logging is generated for identify_mode

	if { [ identify_mode ] } {
		return
	}
	if { [log_file -info] != "" } {
		send_log "$string\n"
	}
	puts "$string"
	return
}

proc show_performance { string } {
##
## show_performance
## -----------
## output performance results to stdout and the log
##
## Usage:
##	show_performance string
## Arguments:
##	string - string to put to log and stdout, a newline will be appended
##			and each newline will be preceeded with "PERF "
## Returns:
##	nothing
## Additional Information:
##	This will log the given string to the log_file, if there is one,
##	and stdout
##
##	This adds a newline at the end of string
##
##	No output nor logging is generated for identify_mode

	if { [ identify_mode ] } {
		return
	}
	regsub -all "\n" $string "\nPERF " message
	if { [log_file -info] != "" } {
		send_log "PERF $message\n"
		send_log "PERF ---------------------------------------------------------------------------\n"
	}
	puts "PERF $message"
	puts "PERF ---------------------------------------------------------------------------\n"
	return
}

proc noop {} {
##
## noop
## ----
## do nothing procedure
##
## Usage:
##	noop
## Returns:
##	nothing
## Additional Information:
##	useful as the body for expect commands when the pattern needs no special
##	execution

	return
}

proc expect_eof { timelimit  { ignore_rest 0 } } {
##
## expect_eof
## ----------
## utility procedure to check for eof
##
## Usage:
##	expect_eof timelimit [ignore_rest]
## Arguments:
##	timelimit - maximum time to wait
##	ignore_rest - ignore any data prior to eof (default 0)
##				if 0 any data received before eof is an error
## Returns:
##	eof found - nothing
##	eof not found - error exception
## Additional Information:
##	This is designed for use within test_case's $code
##	After getting eof, it makes sure the child is terminated
##	by waiting and/or killing child as needed
##	The global timeout is saved, changed and restored by this routine

	global spawn_id
	global expect_out
	global timeout
	global expecting
	global env

	set save_timeout $timeout

	set_timeout $timelimit

	set expecting "EOF"
	if { [ info exists env(TESTDEBUG) ] } {
		log_message "DEBUG: expecting EOF"
	}

	expect {
		eof	noop
		"?*"	{ if { ! $ignore_rest } {
					log_message "\nERROR: expected: EOF"
			  		log_message "\nERROR: Unexpected data: $expect_out(buffer)"
			  		fail_test "Unexpected data"
				  } else {
					exp_continue
				  }
			}
		timeout	{ 
				log_message "\nERROR: timeout ($timeout) waiting for: EOF"
				# get buffered data
				expect *
				if { "$expect_out(buffer)" != "" } {
					log_message "\nERROR: timeout: Received data: $expect_out(buffer)"
					fail_test "timeout: with data"
				} else {
					fail_test "timeout: No data"
				}
			}
		default	{ log_message "\nERROR: expected: EOF"
			  fail_test "default"
			}
	}
	# make sure child terminates
	stop_child $spawn_id

	set timeout $save_timeout

	return
}

proc ignore_rest {} {
##
## ignore_rest
## -----------
## utility procedure to ignore the rest of the output
## by waiting and/or killing child as needed
##
## Usage:
##	ignore_rest
## Returns:
##	nothing
## Additional Information:
##	This is designed for use within test_case's $code

	# make sure child terminates
	child_cleanup
	return
}

proc run_cmd { cmd } {
##
## run_cmd
## -------
## utility procedure to run a command with error logging
##
## Usage:
##	run_cmd cmd
## Typical Usage:
##	run_cmd {exec some_shell_command}
## Returns:
##	output from command on success
## Additional Information:
##	If the given command fails, it is logged along with the output
##	from the command and an error is generated
##	This is designed for use within test_case's $code

	global spawn_id
	global spawn_out
	global interact_out
	global expect_out
	global timeout
	global expecting
	global env

	if { [ info exists env(TESTDEBUG) ] } {
		log_message "DEBUG: run_cmd $cmd"
	}
	if { [catch { eval $cmd } string] == 1 } {
		set info "Command failed: $cmd"
		log_message "$info"
		log_message "$string"
		error "$info" "$info"
	} else {
		return "$string"
	}
}

proc compare_files { file1 file2 } {
##
## compare_files
## -------------
## utility procedure to compare 2 ascii files
##
## Usage:
##	compare_files file1 file2
## Returns:
##	match - nothing
##	mismatch - error exception
## Additional Information:
##	This is designed for use within test_case's $code
##	It causes an error exception if the files do not match

	log_message "Comparing $file1 to $file2"
	run_cmd "exec diff $file1 $file2"
	return
}

proc compare_tdiff_files { file1 file2 } {
##
## compare_tdiff_files
## ---------------------
## utility procedure to compare a file against a pattern/template using tdiff
##
## Usage:
##	compare_tdiff_files file1 file2
## Returns:
##	match - nothing
##	mismatch - error exception
## Additional Information:
##	This is designed for use within test_case's $code
##	It causes an error exception if the files do not match

	log_message "Comparing template $file1 to $file2"
	run_cmd "exec tdiff $file1 $file2"
	return
}

proc compare_binary_files { file1 file2 } {
##
## compare_binary_files
## --------------------
## utility procedure to compare 2 binary files
##
## Usage:
##	compare_files file1 file2
## Returns:
##	match - nothing
##	mismatch - error exception
## Additional Information:
##	This is designed for use within test_case's $code
##	It causes an error exception if the files do not match

	log_message "Comparing Binary $file1 to $file2"
	run_cmd "exec cmp $file1 $file2"
	return
}

proc match_files { range_list template_list file_list } {
##
## match_files
## -----------
## allow a set of templates and data_files to be matched in any combination
##
## Usage:
##	match_files range_list template_list datafile_list
## Arguments:
##	range_list - a list of ranges.  Each indicates the number of times
##		each template must be matched.
##		The number of range_list elements can't exceed
##		the number of template_list elements.  If ranges are not
##		provided for all template_list elements, the additional
##		template_list elementswill all have the last range provided.
##		If the range_list is empty, the default is "1"
##		A given range list element cannot include any spaces or tabs.
##		Format (X and Y are positive integers):
##			X	expect exactly this many matches of command
##			X-Y	Allow up to Y matches, require X matches.
##			X- 	Similar to X-Y, but Y is infinite.
##		The special range "m" may be included at the start of the
##		list to indicate that every template_list entry which has
##		not yet reached its maximum match count should be applied
##		against the data_file.
##	template_list - a list of tdiff template files to be matched.
##	datafile_list - a list of tdiff data files to be matched against the
##		templates.
## Returns:
##	match - "matched"
##	mismatch - error exception
##	invalid args - error exception
## Additional Information:
##	If the template_files are not each matched the required minimum
##	times, this routine returns with an error exception.
##
##	For each data file, the template files are processed in the
##	order provided.  Once a given template file has matched its maximum
##	number of occurances (Y), it is no longer executed against
##	subsequent data files.
##
##	If some of the ranges are not 1, it is valid to have the number of
##	elements in the file_list differ from the number of elements in
##	the template_list.
##
##	The special m range parallels the tdiff %orderless -m option.
##	This allows every unmaxed template to be applied against each data_file
##	This can be especially useful for protocols such as SMTP which may
##	handle multiple recipient distributions by sending 1 distribution with
##	all the recipients or multiple distributions with various subsets of
##	the recipient list.
##	In which case, a template file should exist to match a distribution
##	with the given recipient (and possibly others) and that list could
##	be applied against the distributions received to verify that
##	all the recipients got mail.
#
#	local variables:
#		scoreboard(#) - count that template_list element # has matched
#		range_min(#), range_max(#) - range values for template_list
#				element #

	log_message "Matching files { $range_list } { $template_list } { $file_list }"

	set num_data_files [llength $file_list]
	set num_template_files [llength $template_list]
	set num_ranges [llength $range_list]
	set match_many 0

	if { $num_template_files == 0 } {
		set info "match_files: empty template_list"
		error "$info" "$info"
	}

	if { $num_ranges >= 1 && [lindex $range_list 0] == "m" } {
		set match_many 1
		incr num_ranges -1
		if { $num_ranges > 0 } {
			set range_list [lrange $range_list 1 $num_ranges]
		} else {
			set range_list {}
		}
	}

	if { $num_template_files < $num_ranges } {
		set info "match_files: range_list larger than template_list"
		error "$info" "$info"
	}

	set last_range_min 1
	set last_range_max 1

	for { set i 0 } { $i < $num_template_files } { incr i } {
		if { $i >= $num_ranges } {
			set range_min($i) $last_range_min
			set range_max($i) $last_range_max
		} else {
			parse_range [lindex $range_list $i] range_min($i) range_max($i)
			set last_range_min $range_min($i)
			set last_range_max $range_max($i)
		}
		set scoreboard($i) 0
	}
	foreach data_file $file_list {
		set match 0;	# has this data_file been matched yet
		set allmin 1;	# assume all matched at minimum
		set allmax 1;	# assume all matched at maximum

		for { set i 0 } { $i < $num_template_files } { incr i } {
			set template_file [lindex $template_list $i]
			if { $range_max($i) != -1
			     && $scoreboard($i) >= $range_max($i) } {
				# debug output
				# puts "match_files: template $template_file reached max ($range_max($i))"
				continue
			}
			if { ! $match || $match_many } {
				if { [ catch { exec tdiff $template_file $data_file } ] == 0 } {
					# file match
					incr scoreboard($i)
					set match 1
					log_message "$data_file matched by $template_file"
				}
			}
			if { $allmin && $scoreboard($i) < $range_min($i) } {
				set allmin 0
				# values for error message
				set not_min $template_file
				set not_min_cnt $range_min($i)
			}
			if { $range_max($i) == -1
			     || $scoreboard($i) < $range_max($i) } {
				set allmax 0
			}
		}
		# debug print
		# puts "allmin=$allmin allmax=$allmax match=$match"

		if { ! $match } {
			# no match found for any template
			if { $allmax } {
				set info "match_files: unable to match $data_file, all templates at maximum matches"
				error "$info" "$info"
			} else {
				set info "match_files: unable to match $data_file"
				error "$info" "$info"
			}
		}
	}

	if { ! $allmin } {
		# some template_files not matched minimum number of times
		set info "match_files: unable to match $not_min $not_min_cnt times"
		error "$info" "$info"
	}
	return matched
}

proc parse_range { range min_ref max_ref } {
# parse a range 
# Arguments:
#	range - range to parse
#	min_ref - name of variable to hold min
#	max_ref - name of variable to hold max
# Returns:
#	nothing
# Additional Information:
#	Fatal error on invalid range
#
	upvar $min_ref min
	upvar $max_ref max

	if { [regexp {[0-9]+-[0-9]+} $range] == 1 } {
		scan $range "%d-%d" min max
	} elseif { [regexp {[0-9]+-} $range] == 1 } {
		scan $range "%d-" min
		set max -1
	} elseif { [regexp {[0-9]+} $range] == 1 } {
		set min $range
		set max $min
		if { $min > $max } { 
			set info "Invalid range: $range"
			error "$info" "$info"
		}
	} else {
		set info "Invalid range: $range"
		error "$info" "$info"
	}
	return
}

proc build_file_list { filename_list } {
##
## build_file_list
## ---------------
## Build a list of filenames from a list of files/directories
##
## Arguments:
##	filename_list - list of file/directory names
## Returns:
##	list of non-directory files
##
## Additional Information:
##	All directory entries within filename_list are recursively searched
##	and replaced in the list with a list of all the non-directory files
##	found within them.
##
##	Note that the entire list is kept in memory.

	set file_list {}
	foreach filename $filename_list {
		if { [file isdirectory $filename] } {
			foreach filen [glob $filename/*] {
				set file_list [concat $file_list [build_file_list $filen]]
			}
		} else {
			set file_list [concat $file_list $filename]
		}
	}
	return $file_list
}

proc clean_file_list { filename_list } {
##
## clean_file_list
## ---------------
## delete the files in the file name list.  Used by
## tests that are long running enough
##
## Arguments:
##	filename_list - list of file/directory names
##
## Addtional Information:
##	Intended for use by tests that are long running enough to build
##	up an extreme number of send files.   Large directories cause
##      performance problems for stress tests that run for a long
##	time.
##
	set hit_list [ build_file_list $filename_list ]
	foreach file $hit_list {
		catch { eval exec rm -f $file [ glob -nocomplain $file.* ] }
	}
}

proc repeat_command { command file_list cnt_list } {
##
## repeat_command
## --------------
## execute a command repeatedly against a set of files
##
## Usage:
##	repeat_command command file_list cnt_list
## Arguments:
##	command - tcl command to execute (can include arguments if passed as
##		a list
##	file_list - list of files to supply to each execution of command
##			- note this really does not have to be a filelist
##			however that is the typical case
##	cnt_list - list of counts to apply to command
##		if the cnt_list has fewer elements than the file_list, the
##		last cnt_list entry is applied to the remaining file_list
##		entries.  An empty cnt_list is equivalent to { 1 } 
## Returns:
##	nothing
## Sample:
##	repeat_command { /etc/init.d/ics_srp} { restart } { 10}
##
##	This does the followingi 10 times:
##		/etc/init.d/ics_srp restart
##
##	repeat_command { fsck} { /dev/sdb /dev/sdc /dev/sdd } { 2 2 1}
##		fsck /dev/sdb /dev/sdc /dev/sdd
##		fsck /dev/sdb /dev/sdc
##	Note that per the count, /dev/sdd was omitted from the 2nd call
##
##	This command is very useful when counts are very large

	log_message "performing repeated operation: $command\n\ton { $file_list }\n\tfor { $cnt_list }"

	set num_files [llength $file_list]
	set num_cnt [llength $cnt_list]

	if { $num_files == 0 } {
		set info "repeat_command: empty file_list"
		error "$info" "$info"
	}

	if { $num_files < $num_cnt } {
		set info "repeat_command: cnt_list larger than file_list"
		error "$info" "$info"
	}

	set last_cnt 1
	set max_cnt 1

	# set up cnt(x) for each file_list entry
	for { set i 0 } { $i < $num_files } { incr i } {
		if { $i >= $num_cnt } {
			set cnt($i) $last_cnt
		} else {
			set cnt($i) [lindex $cnt_list $i]
			set last_cnt $cnt($i)
		}
		if { $last_cnt > $max_cnt } {
			set max_cnt $last_cnt
		}
	}

	# do the command max_cnt times
	for { set i 0 } { $i < $max_cnt } { incr i } {

		# build f_list with list of file_list elements to use
		set f_list {}
		for { set j 0 } { $j < $num_files } { incr j } {
			if { $i < $cnt($j) } {
				# [list ...] allows a file_list element to
				# be a list, in which case it is kept as
				# a list
				set f_list [concat $f_list [list [lindex $file_list $j]]]
			}
		}

		# execute command
		log_message "$command [list $f_list]\n"
		uplevel 1 $command [list $f_list]
	}
	return
}

proc scp_get_file { host usercode target_filename local_filename } {
##
## scp_get_file
## --------
## scp a file from the target system to the local system
##
## Usage:
##	scp_get_file host usercode target_filename local_filename
## Arguments:
##	host - host to connect to
##	usercode - usercode on that host
##	target_filename - file to get
##	local_filename - destination file on local system
    global env
    
    if { ! [ info exists env(CFG_SCP_FLAGS) ] } {
        set env(CFG_SCP_FLAGS) ""
    }
    
    #randomly wait up to a minute to alleviate many hosts attempting to scp files at once
    unix_cmd 150 0 "r=\$(( \$RANDOM % 60 + 1 )); sleep \$r"
    
	unix_cmd 400 0 "scp $env(CFG_SCP_FLAGS) $usercode@$host:/nfs/site/proj/stlbuilds/$target_filename $local_filename"
}

proc ftp_connect { host usercode password { do_spawn 1 } } {
##
## ftp_connect
## --------
## establish an ftp connection
##
## Usage:
##	ftp_connect host usercode password [do_spawn]
## Arguments:
##	host - host to connect to
##	usercode - usercode on that host
##	password - password on that host
##	do_spawn - should we spawn a expect session (eg. one not in progress already)
## Additional Information:

	global spawn_id
	global expect_out
	global spawn_out
	global timeout
	global expecting
	global env
	global stty_init

	# -i option will avoid y/n prompts in mget
	if { $do_spawn } {
		test_spawn "ftp -i" ftp -i $host
	} else {
		send_unix_cmd "ftp -i $host"
	}
	set timeout 120
	set expecting "ftp login sequence"
	expect {
		"Name*:"	{ exp_send "$usercode\n"
				  exp_continue
				}
		"assword:"	{ exp_send "$password\n"
				  exp_continue
				}
		"ftp: *
" {
				fail_test "ftp login failed"
				}
		"ftp> "		noop
	}
}

proc ftp_send_file { host usercode password local_filename target_filename {type "binary" } {do_spawn 1}} {
##
## ftp_send_file
## --------
## ftp a file from the local system to the specified target system
##
## Usage:
##	ftp_send_file host usercode password local_filename target_filename [type] [do_spawn]
## Arguments:
##	host - host to connect to
##	usercode - usercode on that host
##	password - password on that host
##	local_filename - file to send
##	target_filename - destination on host for file
##	type - type of file (ascii or binary).  Default is binary
##	do_spawn - should we spawn a expect session (eg. one not in progress already)

	global spawn_id
	global expect_out
	global spawn_out
	global timeout
	global expecting

	set save_timeout $timeout
	ftp_connect $host $usercode $password $do_spawn

	exp_send "$type\n"
	expect_list 60 { "ftp> " } { "Not connected" }

	# allow for large files
	exp_send "exp_send $local_filename $target_filename\n"
	expect_list 600 { "bytes sent" } { "Error" "Not connected" "No such" }
	expect_list 60 { "ftp> " } { "Not connected" "No such" }
	exp_send "quit\n"
	expect_any 60 { "221" "Goodbye" "So long" } { "ftp> " }
	if { $do_spawn } {
		wait_eof 60
	}
	set timeout $save_timeout
}

proc ftp_get_file { host usercode password target_filename local_filename {type "binary" } {do_spawn 1} } {
##
## ftp_get_file
## --------
## ftp a file from the target system to the local system
##
## Usage:
##	ftp_get_file host usercode password target_filename local_filename [type] [do_spawn]
## Arguments:
##	host - host to connect to
##	usercode - usercode on that host
##	password - password on that host
##	target_filename - file to get
##	local_filename - destination file on local system
##	type - type of file (ascii or binary).  Default is binary
##	do_spawn - should we spawn a expect session (eg. one not in progress already)

	global spawn_id
	global expect_out
	global spawn_out
	global timeout

	set save_timeout $timeout
	ftp_connect $host $usercode $password $do_spawn

	exp_send "$type\n"
	expect_list 60 { "ftp> " } { "Not connected" }

	exp_send "get $target_filename $local_filename\n"
	# allow for large files
	expect_list 600 { "bytes received" } { "Error" "Not connected" "No such" }
	expect_list 60 { "ftp> " } { "Not connected" "No such" }
	exp_send "quit\n"
	expect_any 60 { "221" "Goodbye" "So long" } { "ftp> " }
	if { $do_spawn } {
		wait_eof 60
	}
	set timeout $save_timeout
}

proc ftp_mget_files { host usercode password target_directory target_pattern local_directory {type "binary" } {do_spawn 1}} {
##
## ftp_mget_files
## --------
## ftp a set of files from the target system to the local system
##
## Usage:
##	ftp_mget_files host usercode password target_directory target_filename
##		local_directory [type] [do_spawn]
## Arguments:
##	host - host to connect to
##	usercode - usercode on that host
##	password - password on that host
##	target_directory - directory to get files from
##	target_pattern - files to get (can be wildcarded for target system)
##	local_directory - destination directory on local system
##	type - type of files (ascii or binary).  Default is binary
##	do_spawn - should we spawn a expect session (eg. one not in progress already)

	global spawn_id
	global expect_out
	global spawn_out
	global timeout

	set save_timeout $timeout
	ftp_connect $host $usercode $password $do_spawn

	exp_send "$type\n"
	expect_list 60 { "ftp> " } { "Not connected" }

	exp_send "cd $target_directory\n"
	expect_list 60 { "ftp> " } { "Not connected" }
	exp_send "lcd $local_directory\n"
	expect_list 60 { "ftp> " } { "Not connected" }
	exp_send "mget $target_pattern\n"
	# allow for large files
	expect_list 600 { "bytes received" } { "Error" "Not connected" "No such" }
	expect_list 60 { "ftp> " } { "Not connected" "No such" }
	exp_send "quit\n"
	expect_any 60 { "221" "Goodbye" "So long" } { "ftp> " }
	if { $do_spawn } {
		wait_eof 60
	}
	set timeout $save_timeout
}

proc date_stamp {} {
##
## datestamp
## ---------
##
## Usage:
## 	date_stamp
## Returns:
##	date in an identical format to the date command
## Additional Information:
##	should be used in preference to [exec date].  The performance
##      difference is 30 vs 175000 microseconds.
##
	return [timestamp -format "%a %b %d %X %Z %Y"]
}

proc tools_mult_timeout { mult } {
##
## tools_mult_timeout
## ------------
## adjust timeout multiplier by a factor of mult
##
## Usage:
##	tools_mult_timeout mult
## Arguments:
##	mult - multiply timeout multiplier by mult
## Returns:
##	None
## Additional Information:
##	updates the environment variable TEST_TIMEOUT_MULT

	global env
	set env(TEST_TIMEOUT_MULT) [ expr $mult * [ test_get_env TEST_TIMEOUT_MULT] ]
}

proc calc_timeout { timelimit } {
##
## calc_timeout
## ------------
## calculate the timeout value to use by adjusting by TEST_TIMEOUT_MULT
##
## Usage:
##	calc_timeout timelimit
## Arguments:
##	timelimit - a timeout value in seconds
## Returns:
##	timelimit adjusted by TEST_TIMEOUT_MULT
## Additional Information:
##	The environment variable TEST_TIMEOUT_MULT is used as an optional
##	multiplier for all timeout values.  This can be exported by the user
##	to adjust all the timeouts in a test to account for slow systems or
##	the use of debug tools such as printf or purify which significantly
##	affect the performance of the system.

	return [ expr $timelimit * [ test_get_env TEST_TIMEOUT_MULT] ]
}

proc set_timeout { timelimit } {
##
## set_timeout
## -----------
## set the expect timeout variable accounting for TEST_TIMEOUT_MULT
##
## Usage:
##	set_timeout timelimit
## Arguments:
##	timelimit - a timeout value in seconds
## Returns:
##	The timeout value set
## Additional Information:
##	The timeout variable in the callers stack frame is set.  It is up
##	to the caller to determine if this is the global or local version
##	of timeout.

	# sets the expect timeout variable in the stack frame of the caller
	return [ uplevel set timeout [ calc_timeout $timelimit ] ]
}

proc expect_list { timelimit list { error_list "" } { out_var "" } } {
##
## expect_list
## -----------
## apply expect sequencially against a list of messages
##
## Usage:
##	expect_list timelimit list [error_list [out_var]]
## Arguments:
##	timelimit - maximum wait for each message
##	list - list of messages to expect, they are regular expressions
##	error_list - optional list of messages, fails if any gotten,
##		regular expressions
##	out_var - variable in callers context to receive all text which was
##		received during during this expect
## Returns:
##	string in output which matched last regular expression in list
##	(this is also held in $expect_out(0,string) )
## Additional Information:
##	the expecting global is set as each item is expect'ed for such that
##	any errors are appropriately reported
##	an error is automatically reported if any of the error_list messages
##	are gotten at any point.  Avoid having an error_list message be
##	a subset or part of a valid list message
##	The global timeout is saved, changed and restored by this routine

    global spawn_id
    global expect_out
    global spawn_out
    global timeout
    global expecting
    global env

    set save_timeout $timeout

    set_timeout $timelimit

    if { "$out_var" != "" } {
	upvar $out_var out
	set out ""
    }
    # match against a 10 screen buffer
    match_max 19200 
    # expect is real picky about the quoting and braces here
    set fail_cmd {
	log_message "\nERROR: while waiting for: $expecting"
	log_message "\nERROR: Received data: $expect_out(buffer)"
	fail_test "Invalid data"
    }
    foreach item $list {
	set arg_list {}
	foreach err_item $error_list {
	    append arg_list " -re {$err_item} {
				log_message {\nERROR: invalid data: $err_item}
				$fail_cmd
				}"
	}
	append arg_list " -re {$item} noop"
	set expecting "$item"
	if { [ info exists env(TESTDEBUG) ] } {
	    log_message "DEBUG: spawn_id: $spawn_id expecting: $expecting"
	}
	eval expect "{
			$arg_list
		}"
	if { "$out_var" != "" } {
	    append out $expect_out(buffer)
	}
    }

    set timeout $save_timeout
    return $expect_out(0,string)
}

proc expect_any { timelimit list { error_list "" } { out_var ""} } {
##
## expect_any
## -----------
## apply expect once against a list of messages, succeed if matches any one
## message in list
##
## Usage:
##	expect_any timelimit list [error_list [out_var]]
## Arguments:
##	timelimit - maximum wait for each message
##	list - list of messages to expect, they are regular expressions
##	error_list - optional list of messages, fails if any gotten,
##		regular expressions
##	out_var - variable in callers context to receive all text which was
##		received during during this expect
## Returns:
##	string in output which matched a regular expression in list
##	(this is also held in $expect_out(0,string) )
## Additional Information:
##	accept 1st of list (eg. expects any 1 of list)
##	the expecting global is set such that
##	any errors are appropriately reported
##	an error is reported if any of the error_list messages
##	are gotten.  Avoid having an error_list message be
##	a subset or part of a valid list message
##	The global timeout is saved, changed and restored by this routine

	global spawn_id
	global expect_out
	global spawn_out
	global timeout
	global expecting
	global env

	set save_timeout $timeout

	set_timeout $timelimit

	if { "$out_var" != "" } {
		upvar $out_var out
		set out ""
	}
	# expect is real picky about the quoting and braces here
	set fail_cmd {
		log_message "\nERROR: while waiting for: $expecting"
		log_message "\nERROR: Received data: $expect_out(buffer)"
		fail_test "Invalid data"
	}
	set expecting ""
	set arg_list {}
	foreach err_item $error_list {
		append arg_list " -re {$err_item} {
			log_message {\nERROR: invalid data: $err_item}
			$fail_cmd
			}"
	}
	foreach item $list {
		append arg_list " -re {$item} noop"
		if { "$expecting" == "" } {
			set expecting "$item"
		} else {
			set expecting "$expecting OR $item"
		}
	}
	if { [ info exists env(TESTDEBUG) ] } {
		log_message "DEBUG: spawn_id: $spawn_id expecting: $expecting"
	}
	eval expect "{
		 $arg_list
	}"
	if { "$out_var" != "" } {
		append out $expect_out(buffer)
	}

	set timeout $save_timeout
	return $expect_out(0,string)
}

proc expect_progress { timelimit progress_list done_list { error_list "" } { out_var "" } } {
##
## expect_progress
## -----------
## apply expect against a long running operation which reflects progress
##
## Usage:
##	expect_progress timelimit progress_list done_list [error_list [out_var]]
## Arguments:
##	timelimit - maximum wait for each message
##	progress_list - list of progress messages to expect,
##		they are regular expressions
##	done_list - list of completion messages to expect,
##		they are regular expressions
##	error_list - optional list of messages, fails if any gotten,
##		regular expressions
##	out_var - variable in callers context to receive all text which was
##		received during during this expect
## Returns:
##	string in output which matched a regular expression in done_list
##	(this is also held in $expect_out(0,string) )
## Additional Information:
##	expects to see at least one progress_list or done_list message
##	within each timelimit interval.
##	progress_list messages reset timelimit and expect is run again
##	progress_list messages can be repeated or seen in any order.
##	There is no requirement for any nor all of them to occur.
##	done_list messages indicate completion and all must appear in the order
##	given.
##	the expecting global is set as each done item is expect'ed for such that
##	any errors are appropriately reported
##	an error is automatically reported if any of the error_list messages
##	are gotten at any point.  Avoid having an error_list message be
##	a subset or part of a valid list message
##	The global timeout is saved, changed and restored by this routine

    global spawn_id
    global expect_out
    global spawn_out
    global timeout
    global expecting
    global env

    set save_timeout $timeout

    set_timeout $timelimit

    if { "$out_var" != "" } {
	upvar $out_var out
	set out ""
    }
    # expect is real picky about the quoting and braces here
    set fail_cmd {
	log_message "\nERROR: while waiting for: $expecting"
	log_message "\nERROR: Received data: $expect_out(buffer)"
	fail_test "Invalid data"
    }
    foreach item $done_list {
	set arg_list {}
	foreach err_item $error_list {
	    append arg_list " -re {$err_item} {
				log_message {\nERROR: invalid data: $err_item}
				$fail_cmd
				}"
	}
	append arg_list " -re {$item} noop"
	set expecting "$item"
	foreach prog_item $progress_list {
	    append arg_list " -re {$prog_item}"
	    append arg_list { {
		if { "$out_var" != "" } {
		    append out $expect_out(buffer)
		}
		exp_continue } }
	    append expecting " OR $prog_item"
	}
	if { [ info exists env(TESTDEBUG) ] } {
	    log_message "DEBUG: spawn_id: $spawn_id expecting: $expecting"
	}
	eval expect "{
			 $arg_list
		}"
	if { "$out_var" != "" } {
	    append out $expect_out(buffer)
	}
    }

    set timeout $save_timeout
    return $expect_out(0,string)
}

proc _got_orderless_item { got once listname error_listname indexname } {
#
# _got_orderless_item
# -------------------
# Process a matched item
#
# Usage:
# _got_orderless_item got once listname error_listname indexname
#
# Arguments:
#	got - pattern matched within the list
#	once - should got be added to error_list (1=yes, 0=no)
#	listname - name of list variable which contains $got
#	error_listname - name of error_list variable to update
#	indexname - name of variable to hold index of got within list
# Additional Information:
#	list is updated to remove got from it
#	if once, got is appended to error_list
#	fatal error if got not found in list
#	list is a list of string match, expect style patterns

	upvar $listname list
	upvar $error_listname error_list
	upvar $indexname index

	set index [ lsearch -exact $list $got ]
	if { $index == -1 } {
		fail_test "Bug in expect_list_orderless: got={$got}, list={$list}"
	}
	set list [ lreplace $list $index $index ]
	if { $once } {
		lappend error_list "$got"
	}
}

proc expect_list_orderless { timelimit once list { error_list "" } { out_var ""} } {
##
## expect_list_orderless
## -----------
## apply expect against a list of messages, order independently
##
## Usage:
##	expect_list_orderless timelimit once list [error_list [out_var]]
## Arguments:
##	timelimit - maximum time to wait for next message
##	once - if 1, it is an error for any of the messages in list to
##		occur more than once, if 0 they can occur any number of
##		times
##	list - list of messages to expect, regular expressions
##	error_list - optional list of messages, fails if any gotten,
##		regular expressions
##	out_var - variable in callers context to receive all text which was
##		received during during this expect
## Returns:
##	string in output which last matched a regular expression in list
##	(this is also held in $expect_out(0,string) )
## Additional Information:
##	the expecting global is set as each item set is expect'ed for such that
##	any errors are appropriately reported
##	an error is automatically reported if any of the error_list messages
##	are gotten at any point.  Avoid having an error_list message be
##	a subset or part of a valid list message
##	list messages should not be subsets of eachother
##	The messages in list are permitted to occur in any order.
##	If you want to permit a given message to occur more than once,
##	set once=0
##	The global timeout is saved, changed and restored by this routine

	global spawn_id
	global expect_out
	global spawn_out
	global timeout
	global expecting
	global env

	set save_timeout $timeout

	set_timeout $timelimit

	if { "$out_var" != "" } {
		upvar $out_var out
		set out ""
	}
	# expect is real picky about the quoting and braces here
	set fail_cmd {
		log_message "\nERROR: while waiting for: $expecting"
		log_message "\nERROR: Received data: $expect_out(buffer)"
		fail_test "Invalid data"
	}
	while { [ llength $list ] != 0 } {
		# build arg_list with the list of valid items
		# expecting indicates all the items remaining
		set expecting ""
		set arg_list {}
		foreach err_item $error_list {
			append arg_list " -re {$err_item} {
				log_message {\nERROR: invalid data: $err_item}
				$fail_cmd
				}"
		}
		foreach item $list {
			append arg_list " -re {$item} { set got {$item} }"
			if { "$expecting" == "" } {
				set expecting "$item"
			} else {
				set expecting "$expecting OR $item"
			}
		}
		set got ""
		if { [ info exists env(TESTDEBUG) ] } {
			log_message "DEBUG: expecting $expecting"
		}
		eval expect "{
		 	$arg_list
		}"
		if { "$out_var" != "" } {
			append out $expect_out(buffer)
		}
		# if we did not fail_test by getting an invalid item,
		# got will indicate what we did get
		# Remove that item from the list of valid messages and
		# if once, add it to the list of invalid message
		if { "$got" != "" } {
			_got_orderless_item $got $once list error_list index
			# expect will take a single output buffer and
			# apply the patterns in the order given, in the event
			# of a large output or multiple patterns which should
			# be matched by a single line, expect may match fewer
			# patterns than are possible to be matched
			# now see if other patterns in the remainder of
			# the list are also matched
			foreach item [ lrange $list $index end ] {
				if { [ regexp ".*$item.*" $expect_out(buffer) ] == 1 } {
					_got_orderless_item $item $once list error_list index
				}
			}
		} else {
			fail_test "Bug in expect_list_orderless, got={}"
		}
	}

	set timeout $save_timeout
	return $expect_out(0,string)
}

proc expect_progress_orderless { timelimit progress_list once done_list { error_list "" } { out_var ""} } {
##
## expect_progress_orderless
## -----------
## apply expect against a long running operation which reflects progress
## and expect a list of messages, order independently
##
## Usage:
##	expect_progress_orderless timelimit progress_list once done_list [error_list [out_var]]
## Arguments:
##	timelimit - maximum time to wait for next message
##	progress_list - list of progress messages to expect,
##		they are regular expressions
##	once - if 1, it is an error for any of the messages in list to
##		occur more than once, if 0 they can occur any number of
##		times
##	done_list - list of completion messages to expect, regular expressions
##	error_list - optional list of messages, fails if any gotten,
##		regular expressions
##	out_var - variable in callers context to receive all text which was
##		received during during this expect
## Returns:
##	string in output which last matched a regular expression in done_list
##	(this is also held in $expect_out(0,string) )
## Additional Information:
##	expects to see at least one progress_list or done_list message
##	within each timelimit interval.
##	progress_list messages reset timelimit and expect is run again
##	progress_list messages can be repeated or seen in any order.
##	There is no requirement for any nor all of them to occur.
##	the expecting global is set as each item set is expect'ed for such that
##	any errors are appropriately reported
##	an error is automatically reported if any of the error_list messages
##	are gotten at any point.  Avoid having an error_list message be
##	a subset or part of a valid list message
##	done_list messages should not be subsets of eachother
##	The messages in done_list are permitted to occur in any order.
##	If you want to permit a given message to occur more than once,
##	set once=0
##	The global timeout is saved, changed and restored by this routine

	global spawn_id
	global expect_out
	global spawn_out
	global timeout
	global expecting
	global env

	set save_timeout $timeout

	set_timeout $timelimit

	if { "$out_var" != "" } {
		upvar $out_var out
		set out ""
	}
	# expect is real picky about the quoting and braces here
	set fail_cmd {
		log_message "\nERROR: while waiting for: $expecting"
		log_message "\nERROR: Received data: $expect_out(buffer)"
		fail_test "Invalid data"
	}
	while { [ llength $done_list ] != 0 } {
		# build arg_list with the list of valid items
		# expecting indicates all the items remaining
		set expecting ""
		set arg_list {}
		foreach err_item $error_list {
			append arg_list " -re {$err_item} {
				log_message {\nERROR: invalid data: $err_item}
				$fail_cmd
				}"
		}
		foreach item $done_list {
			append arg_list " -re {$item} { set got {$item} }"
			if { "$expecting" == "" } {
				set expecting "$item"
			} else {
				set expecting "$expecting OR $item"
			}
		}
		foreach prog_item $progress_list {
	    	append arg_list " -re {$prog_item}"
	    	append arg_list { {
			if { "$out_var" != "" } {
		    	append out $expect_out(buffer)
			}
			exp_continue } }
	    	append expecting " OR $prog_item"
		}
		set got ""
		if { [ info exists env(TESTDEBUG) ] } {
	    	log_message "DEBUG: spawn_id: $spawn_id expecting: $expecting"
		}
		eval expect "{
		 	$arg_list
		}"
		if { "$out_var" != "" } {
			append out $expect_out(buffer)
		}
		# if we did not fail_test by getting an invalid item,
		# got will indicate what we did get
		# Remove that item from the list of valid messages and
		# if once, add it to the list of invalid message
		if { "$got" != "" } {
			_got_orderless_item $got $once done_list error_list index
			# expect will take a single output buffer and
			# apply the patterns in the order given, in the event
			# of a large output or multiple patterns which should
			# be matched by a single line, expect may match fewer
			# patterns than are possible to be matched
			# now see if other patterns in the remainder of
			# the list are also matched
			foreach item [ lrange $done_list $index end ] {
				if { [ regexp ".*$item.*" $expect_out(buffer) ] == 1 } {
					_got_orderless_item $item $once done_list error_list index
				}
			}
		} else {
			fail_test "Bug in expect_list_orderless, got={}"
		}
	}

	set timeout $save_timeout
	return $expect_out(0,string)
}

proc test_execute {list code} {
##
## test_execute
## ------------
## Conditionally execute the block of code specified
##
## Usage:
##	test_execute { list } {
##		conditional code
##	}
## Arguments:
##	list - a list of conditions under which the code should not be run.  The
##		items specified in the list are searched for in the
##		TEST_NOEXECUTE environment variable and if found, the code is
##		not executed.
##	code = Tcl commands for execution (typically a list)
## Returns:
##	None
## Additional Information:
##	This routine will not execute any code if any of the items in the list
##	are located in the TEST_NOEXECUTE environment variable list.
##	Further, the code is not executed if the TEST_IDENTIFY environment
##	variable is defined.
##
    global env

    if { ! [ identify_mode ] } {
	set skip 0
	if { [ info exists env(TEST_NOEXECUTE) ] } {
	    foreach item $list {
		if { [lsearch -exact "$env(TEST_NOEXECUTE)" $item ] != -1 } {
		    set skip 1;
		    break;
		}
	    }
	}
	if { $skip == 0 } {
	    uplevel $code
	}
    }
}

proc show_config {} {
##
## show_config
## -------------
## determine if tests are being run with show config enabled
##
## Usage:
##	show_config
## Returns:
##	0 - no
##	1 - yes

	return [ test_check_yn [ test_get_env TEST_SHOW_CONFIG ] ]
}

proc show_start {} {
##
## show_start
## -------------
## determine if tests are being run with show start enabled
##
## Usage:
##	show_start
## Returns:
##	0 - no
##	1 - yes

	return [ test_check_yn [ test_get_env TEST_SHOW_START ] ]
}

proc identify_mode {} {
##
## identify_mode
## -------------
## determine if tests are being run in identify mode
##
## Usage:
##	identify_mode
## Returns:
##	0 - no
##	1 - yes

	return [ test_check_yn [ test_get_env TEST_IDENTIFY ] ]
}

proc get_config_path { filename } {
##
## get_config_path
## -----------
## Return the path to the configuration file specified
##
## Arguments:
##	filename - name of a configuration file
## Additional Information:

	global env

	if { ( ! [ file exists $filename ] || [ file isdirectory $filename ] )
		 && [ info exists env(TL_DIR) ] } {
		set dir1 "$env(TL_DIR)/configs"
		set dir2 "$env(TL_DIR)/HostTestCases/configs"
		if { [ file isdirectory "$dir1" ]
			&& [ file exists "$dir1/$filename" ] } {
			return "$dir1/$filename"
		} elseif { [ file isdirectory "$dir2" ]
			&& [ file exists "$dir2/$filename" ] } {
			return "$dir2/$filename"
		} else {
			set res "get_config_path: unable to find $filename in ., $dir1 nor $dir2"
			log_message "$res"
			error "$res"
		}
	} elseif { [ file exists $filename ] } {
		return "$filename"
	}
}

proc read_config { filename } {
##
## read_config
## -----------
## Read the configuration file specified
##
## Arguments:
##	filename - name of a configuration file
## Additional Information:
##	The configuration file consists of lines of the form:
##		parameter_name	value
##				OR
##		include filename
##	If the "parameter_name" is # or the line is blank,
##	the given line is ignored
##	Otherwise the parameter is set to the supplied value unless
##	it is already exported.  This allows the configuration to
##	be partially overridden by the environment.  It also permits the
##	setting of any environment variables (such as TEST_FAIL_SAVE)
##	An omitted "value" field results in the parameter being set to the
##	null string
##	If parameter_name is already exported, it overrides the value in the
##	config file.  Due to use of eval, value for a parameter can include
##	references to other parameters and environment variables as $env(name)

	global env

	if { [ show_config ] } {
		show_message "Test Configuration File: $filename"
	}
	set filename [get_config_path $filename ]
	set fileid [ open $filename "r" ]
	if { [ catch {
		while { [ gets $fileid line ] != -1 } {
			set parameter [ lindex $line 0 ]
			if { "$parameter" == ""
			     || "[ string index $parameter 0 ]" == "#" } {
				# comment or blank line
				continue
			}
			eval set value \[ lreplace \"$line\" 0 0 \]
			#set value [ lreplace $line 0 0 ]
			if { "$parameter" == "include" } {
				read_config "$value"
			} else {
				if { ! [ info exists env($parameter) ] } {
					set env($parameter) "$value"
					#eval set env($parameter) \"$value\"
				}
				# show the parameter values
				set message [ format "%-25s %s" $parameter $env($parameter) ]
				if { [ show_config ] } {
					show_message "$message"
				}
			}
		}
		if { [ show_config ] } {
			show_message ""
		}
		close $fileid
	} res ] != 0 } {
		catch { close $fileid }
		if { [ show_config ] } {
			show_message ""
		}
		log_message "read_config: $res in variable \"$parameter\""
		error "read_config: $res in variable \"$parameter\""
	}

	# fixup suffixes, this is a hack, should be put elsewhere
	if { [ info exists env(CFG_IPOIB_SUFFX) ]
		 && [ string equal "$env(CFG_IPOIB_SUFFIX)" "NONE" ] } {
		set env(CFG_IPOIB_SUFFIX) ""
	}
	if { [ info exists env(CFG_IPOIB_PREFIX) ]
		 && [ string equal "$env(CFG_IPOIB_PREFIX)" "NONE" ] } {
		set env(CFG_IPOIB_PREFIX) ""
	}
	if { [ info exists env(CFG_INIC_SUFFX) ]
		 && [ string equal "$env(CFG_INIC_SUFFIX)" "NONE" ] } {
		set env(CFG_INIC_SUFFIX) ""
	}
}

proc get_config { var } {
##
## get_config
## -------------
## support function for use by front.sh to get config file values
## out to a calling bash shell

	#global env

	#puts "$env($var)"
	puts [test_get_env "$var"]
}

proc sum_list { list } {
##
## sum_list
## -------------
## compute the sum of a list of numbers
##
## Usage:
##	sum_list list
## Arguments:
##	list - a list of numbers
## Returns:
##	sum
## Additional Information:
##	non-numeric entries in the list are quietly ignored

	set sum 0
	foreach entry $list {
		if { [ string is double $entry ] } {
			incr sum $entry
		}
	}
	return $sum
}

proc tools_get_platform {} {
    global tcl_platform
    return $tcl_platform(machine)
}

# IPtoHex assumes IP has already been validated
proc IPtoHex { IP } {
    binary scan [binary format c4 [split $IP .]] H8 Hex
    return $Hex
}

proc hexToIP { Hex } {
    binary scan [binary format H8 $Hex] c4 IPtmp
    foreach num $IPtmp {
    # binary scan "c" format gives signed int - the following
    # [expr]-ology converts to unsigned (from [binary] manpage)
        lappend IP [expr ($num + 0x100) % 0x100]
    }
    set IP [join $IP .]
    return $IP
}

# IP and netmask in Hex, returns hex
proc broadcastAddress { hexIP hexNetmask } {
    set tmpBrdAddr [expr 0x$hexIP | ( 0x$hexNetmask ^ 0xffffffff )]
    binary scan [binary format I $tmpBrdAddr] H8 broadcastAddress
    return $broadcastAddress
}

# IP and netmask in Hex, returns hex
proc networkAddress { hexIP hexNetmask } {
    set compNetmask [expr 0x$hexNetmask ^ 0xffffffff]
    set tmpNetAddr [expr ( 0x$hexIP | $compNetmask ) ^ $compNetmask]
    binary scan [binary format I $tmpNetAddr] H8 networkAddress
    return $networkAddress
}

proc IPisValid { IP } {
    # must contain only dots and digits
    # this originally read:-
    #if { [regsub -all {[.0-9]} $IP {}] != "" } {
    #  return 0
    #}
    regsub -all {[.0-9]} $IP {} tmpStr
    if { $tmpStr != "" } {
        return 0
    }
    # however this appears to be a 8.4.1-ism which doesn't work with
    # earlier versions (e.g. the 8.4a2 version that the PocketPC tcltk
    # version is based on.
    #
    # exactly three dots
    regsub -all {[0-9]} $IP {} tmpStr
    if { $tmpStr != "..." } {
        return 0
    }
    # each numerical component is between 0 and 255
    foreach b [split $IP .] {
        if { [string length $b] == 0 } {
            return 0
        }
        set ob $b
        scan $b %d b ;# allow for leading zeros which tcl thinks are octal
        if { $b < 0 | $b > 255 } {
           return 0
        }
    }
    return 1
}


proc getBroadCast { ip netmask } {
##
## getBroadCast
## -----------
## Return the broadcast address given IP and netmask
##
## Usage:
##  getBroadCast ip netmask
## Arguments:
##  ip - valid IP address. can be a network address.
##  netmask - the netmask for this IP.
## Returns
##  netmask in the form nnn.nnn.nnn.nnn
##
    if { ! [IPisValid $ip] } {
        error "IP is not valid"
    }
    if { ! [IPisValid $netmask] } {
        error "Netmask is not valid"
    }
    set hexIP [IPtoHex $ip]
    set hexNM [IPtoHex $netmask]
    set hexBC [broadcastAddress $hexIP $hexNM]
    set broadcastAddress [hexToIP $hexBC]
    return $broadcastAddress
}

proc getNetWork { ip netmask } {
##
## getNetWork
## -----------
## Return the netowkr address given IP and netmask
##
## Usage:
##  getNwtWork ip netmask
## Arguments:
##  ip - valid IP address. can be a network address.
##  netmask - the netmask for this IP.
## Returns
##  network in the form nnn.nnn.nnn.nnn
##
    if { ! [IPisValid $ip] } {
        error "IP is not valid"
    }
    if { ! [IPisValid $netmask] } {
        error "Netmask is not valid"
    }
    set hexIP [IPtoHex $ip]
    set hexNM [IPtoHex $netmask]
    set hexNW [networkAddress $hexIP $hexNM]
    set networkAddress [hexToIP $hexNW]
    return $networkAddress
}