File: test.py

package info (click to toggle)
ns3 3.26%2Bdfsg-1
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 117,520 kB
  • ctags: 72,063
  • sloc: cpp: 462,724; python: 364,339; perl: 8,720; ansic: 7,153; xml: 3,401; makefile: 1,981; sh: 628
file content (1941 lines) | stat: -rwxr-xr-x 78,937 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
#! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
#
# Copyright (c) 2009 University of Washington
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
#
from __future__ import print_function
import os
import sys
import time
import optparse
import subprocess
import threading
import signal
import xml.dom.minidom
import shutil
import re

from utils import get_list_from_file

try:
    import queue
except ImportError:
    import Queue as queue
#
# XXX This should really be part of a waf command to list the configuration
# items relative to optional ns-3 pieces.
#
# A list of interesting configuration items in the waf configuration 
# cache which we may be interested in when deciding on which examples
# to run and how to run them.  These are set by waf during the 
# configuration phase and the corresponding assignments are usually
# found in the associated subdirectory wscript files.
#
interesting_config_items = [
    "NS3_ENABLED_MODULES",
    "NS3_MODULE_PATH",
    "NSC_ENABLED",
    "ENABLE_REAL_TIME",
    "ENABLE_THREADING",
    "ENABLE_EXAMPLES",
    "ENABLE_TESTS",
    "EXAMPLE_DIRECTORIES",
    "ENABLE_PYTHON_BINDINGS",
    "NSCLICK",
    "ENABLE_BRITE",
    "ENABLE_OPENFLOW",
    "APPNAME",
    "BUILD_PROFILE",
    "VERSION",
    "PYTHON",
    "VALGRIND_FOUND",
]

NSC_ENABLED = False
ENABLE_REAL_TIME = False
ENABLE_THREADING = False
ENABLE_EXAMPLES = True
ENABLE_TESTS = True
NSCLICK = False
ENABLE_BRITE = False
ENABLE_OPENFLOW = False
EXAMPLE_DIRECTORIES = []
APPNAME = ""
BUILD_PROFILE = ""
BUILD_PROFILE_SUFFIX = ""
VERSION = ""
PYTHON = ""
VALGRIND_FOUND = True

#
# This will be given a prefix and a suffix when the waf config file is
# read.
#
test_runner_name = "test-runner"

#
# If the user has constrained us to run certain kinds of tests, we can tell waf
# to only build
#
core_kinds = ["bvt", "core", "performance", "system", "unit"]

#
# There are some special cases for test suites that kill valgrind.  This is
# because NSC causes illegal instruction crashes when run under valgrind.
#
core_valgrind_skip_tests = [
    "ns3-tcp-cwnd",
    "nsc-tcp-loss",
    "ns3-tcp-interoperability",
    "routing-click",
    "lte-rr-ff-mac-scheduler",
    "lte-tdmt-ff-mac-scheduler",
    "lte-fdmt-ff-mac-scheduler",
    "lte-pf-ff-mac-scheduler",
    "lte-tta-ff-mac-scheduler",
    "lte-fdbet-ff-mac-scheduler",
    "lte-ttbet-ff-mac-scheduler",
    "lte-fdtbfq-ff-mac-scheduler",
    "lte-tdtbfq-ff-mac-scheduler",
    "lte-pss-ff-mac-scheduler",
]

# 
# There are some special cases for test suites that fail when NSC is
# missing.
#
core_nsc_missing_skip_tests = [
    "ns3-tcp-cwnd",
    "nsc-tcp-loss",
    "ns3-tcp-interoperability",
]

#
# Parse the examples-to-run file if it exists.
#
# This function adds any C++ examples or Python examples that are to be run
# to the lists in example_tests and python_tests, respectively.
#
def parse_examples_to_run_file(
    examples_to_run_path,
    cpp_executable_dir,
    python_script_dir,
    example_tests,
    example_names_original,
    python_tests):

    # Look for the examples-to-run file exists.
    if os.path.exists(examples_to_run_path):

        # Each tuple in the C++ list of examples to run contains
        #
        #     (example_name, do_run, do_valgrind_run)
        #
        # where example_name is the executable to be run, do_run is a
        # condition under which to run the example, and do_valgrind_run is
        # a condition under which to run the example under valgrind.  This
        # is needed because NSC causes illegal instruction crashes with
        # some tests when they are run under valgrind.
        #
        # Note that the two conditions are Python statements that
        # can depend on waf configuration variables.  For example,
        #
        #     ("tcp-nsc-lfn", "NSC_ENABLED == True", "NSC_ENABLED == False"),
        #
        cpp_examples = get_list_from_file(examples_to_run_path, "cpp_examples")
        for example_name, do_run, do_valgrind_run in cpp_examples:

            # Seperate the example name from its arguments.
            example_name_original = example_name
            example_name_parts = example_name.split(' ', 1)
            if len(example_name_parts) == 1:
                example_name      = example_name_parts[0]
                example_arguments = ""
            else:
                example_name      = example_name_parts[0]
                example_arguments = example_name_parts[1]

            # Add the proper prefix and suffix to the example name to
            # match what is done in the wscript file.
            example_path = "%s%s-%s%s" % (APPNAME, VERSION, example_name, BUILD_PROFILE_SUFFIX)

            # Set the full path for the example.
            example_path = os.path.join(cpp_executable_dir, example_path)
            example_name = os.path.join(
                os.path.relpath(cpp_executable_dir, NS3_BUILDDIR),
                example_name)
            # Add all of the C++ examples that were built, i.e. found
            # in the directory, to the list of C++ examples to run.
            if os.path.exists(example_path):
                # Add any arguments to the path.
                if len(example_name_parts) != 1:
                    example_path = "%s %s" % (example_path, example_arguments)
                    example_name = "%s %s" % (example_name, example_arguments)

                # Add this example.
                example_tests.append((example_name, example_path, do_run, do_valgrind_run))
                example_names_original.append(example_name_original)
    
        # Each tuple in the Python list of examples to run contains
        #
        #     (example_name, do_run)
        #
        # where example_name is the Python script to be run and
        # do_run is a condition under which to run the example.
        #
        # Note that the condition is a Python statement that can
        # depend on waf configuration variables.  For example,
        #
        #     ("realtime-udp-echo.py", "ENABLE_REAL_TIME == True"),
        #
        python_examples = get_list_from_file(examples_to_run_path, "python_examples")
        for example_name, do_run in python_examples:
            # Seperate the example name from its arguments.
            example_name_parts = example_name.split(' ', 1)
            if len(example_name_parts) == 1:
                example_name      = example_name_parts[0]
                example_arguments = ""
            else:
                example_name      = example_name_parts[0]
                example_arguments = example_name_parts[1]

            # Set the full path for the example.
            example_path = os.path.join(python_script_dir, example_name)

            # Add all of the Python examples that were found to the
            # list of Python examples to run.
            if os.path.exists(example_path):
                # Add any arguments to the path.
                if len(example_name_parts) != 1:
                    example_path = "%s %s" % (example_path, example_arguments)

                # Add this example.
                python_tests.append((example_path, do_run))

#
# The test suites are going to want to output status.  They are running
# concurrently.  This means that unless we are careful, the output of
# the test suites will be interleaved.  Rather than introducing a lock
# file that could unintentionally start serializing execution, we ask
# the tests to write their output to a temporary directory and then 
# put together the final output file when we "join" the test tasks back
# to the main thread.  In addition to this issue, the example programs
# often write lots and lots of trace files which we will just ignore.
# We put all of them into the temp directory as well, so they can be
# easily deleted.
#
TMP_OUTPUT_DIR = "testpy-output"

def read_test(test):
    result = test.find('Result').text
    name = test.find('Name').text
    if not test.find('Time') is None:
        time_real = test.find('Time').get('real')
    else:
        time_real = ''
    return (result, name, time_real)

#
# A simple example of writing a text file with a test result summary.  It is 
# expected that this output will be fine for developers looking for problems.
#
def node_to_text (test, f):
    (result, name, time_real) = read_test(test)
    output = "%s: Test Suite \"%s\" (%s)\n" % (result, name, time_real)
    f.write(output)
    for details in test.findall('FailureDetails'):
        f.write("    Details:\n")
        f.write("      Message:   %s\n" % details.find('Message').text)
        f.write("      Condition: %s\n" % details.find('Condition').text)
        f.write("      Actual:    %s\n" % details.find('Actual').text)
        f.write("      Limit:     %s\n" % details.find('Limit').text)
        f.write("      File:      %s\n" % details.find('File').text)
        f.write("      Line:      %s\n" % details.find('Line').text)
    for child in test.findall('Test'):
        node_to_text(child, f)

def translate_to_text(results_file, text_file):
    f = open(text_file, 'w')
    import xml.etree.ElementTree as ET
    et = ET.parse (results_file)
    for test in et.findall('Test'):
        node_to_text (test, f)

    for example in et.findall('Example'):
        result = example.find('Result').text
        name = example.find('Name').text
        if not example.find('Time') is None:
            time_real = example.find('Time').get('real')
        else:
            time_real = ''
        output = "%s: Example \"%s\" (%s)\n" % (result, name, time_real)
        f.write(output)

    f.close()
    
#
# A simple example of writing an HTML file with a test result summary.  It is 
# expected that this will eventually be made prettier as time progresses and
# we have time to tweak it.  This may end up being moved to a separate module
# since it will probably grow over time.
#
def translate_to_html(results_file, html_file):
    f = open(html_file, 'w')
    f.write("<html>\n")
    f.write("<body>\n")
    f.write("<center><h1>ns-3 Test Results</h1></center>\n")

    #
    # Read and parse the whole results file.
    #
    import xml.etree.ElementTree as ET
    et = ET.parse(results_file)

    #
    # Iterate through the test suites
    #
    f.write("<h2>Test Suites</h2>\n")
    for suite in et.findall('Test'):     
        #
        # For each test suite, get its name, result and execution time info
        #
        (result, name, time) = read_test (suite)

        # 
        # Print a level three header with the result, name and time.  If the 
        # test suite passed, the header is printed in green. If the suite was
        # skipped, print it in orange, otherwise assume something bad happened
        # and print in red.
        #
        if result == "PASS":
            f.write("<h3 style=\"color:green\">%s: %s (%s)</h3>\n" % (result, name, time))
        elif result == "SKIP":
            f.write("<h3 style=\"color:#ff6600\">%s: %s (%s)</h3>\n" % (result, name, time))
        else:
            f.write("<h3 style=\"color:red\">%s: %s (%s)</h3>\n" % (result, name, time))

        #
        # The test case information goes in a table.
        #
        f.write("<table border=\"1\">\n")

        #
        # The first column of the table has the heading Result
        #
        f.write("<th> Result </th>\n")

        #
        # If the suite crashed or is skipped, there is no further information, so just
        # delare a new table row with the result (CRASH or SKIP) in it.  Looks like:
        #
        #   +--------+
        #   | Result |
        #   +--------+
        #   | CRASH  |
        #   +--------+
        #
        # Then go on to the next test suite.  Valgrind and skipped errors look the same.
        #
        if result in ["CRASH", "SKIP", "VALGR"]:
            f.write("<tr>\n")
            if result == "SKIP":
                f.write("<td style=\"color:#ff6600\">%s</td>\n" % result)
            else:
                f.write("<td style=\"color:red\">%s</td>\n" % result)
            f.write("</tr>\n")
            f.write("</table>\n")
            continue

        #
        # If the suite didn't crash, we expect more information, so fill out
        # the table heading row.  Like,
        #
        #   +--------+----------------+------+
        #   | Result | Test Case Name | Time |
        #   +--------+----------------+------+
        #
        f.write("<th>Test Case Name</th>\n")
        f.write("<th> Time </th>\n")

        #
        # If the test case failed, we need to print out some failure details
        # so extend the heading row again.  Like,
        #
        #   +--------+----------------+------+-----------------+
        #   | Result | Test Case Name | Time | Failure Details |
        #   +--------+----------------+------+-----------------+
        #
        if result == "FAIL":
            f.write("<th>Failure Details</th>\n")

        #
        # Now iterate through all of the test cases.
        #
        for case in suite.findall('Test'):

            #
            # Get the name, result and timing information from xml to use in
            # printing table below.
            #
            (result, name, time) = read_test(case)

            #
            # If the test case failed, we iterate through possibly multiple
            # failure details
            #
            if result == "FAIL":
                #
                # There can be multiple failures for each test case.  The first
                # row always gets the result, name and timing information along
                # with the failure details.  Remaining failures don't duplicate
                # this information but just get blanks for readability.  Like,
                #
                #   +--------+----------------+------+-----------------+
                #   | Result | Test Case Name | Time | Failure Details |
                #   +--------+----------------+------+-----------------+
                #   |  FAIL  | The name       | time | It's busted     |   
                #   +--------+----------------+------+-----------------+
                #   |        |                |      | Really broken   |   
                #   +--------+----------------+------+-----------------+
                #   |        |                |      | Busted bad      |   
                #   +--------+----------------+------+-----------------+
                #

                first_row = True
                for details in case.findall('FailureDetails'):

                    #
                    # Start a new row in the table for each possible Failure Detail
                    #
                    f.write("<tr>\n")

                    if first_row:
                        first_row = False
                        f.write("<td style=\"color:red\">%s</td>\n" % result)
                        f.write("<td>%s</td>\n" % name)
                        f.write("<td>%s</td>\n" % time)
                    else:
                        f.write("<td></td>\n")
                        f.write("<td></td>\n")
                        f.write("<td></td>\n")

                    f.write("<td>")
                    f.write("<b>Message: </b>%s, " % details.find('Message').text)
                    f.write("<b>Condition: </b>%s, " % details.find('Condition').text)
                    f.write("<b>Actual: </b>%s, " % details.find('Actual').text)
                    f.write("<b>Limit: </b>%s, " % details.find('Limit').text)
                    f.write("<b>File: </b>%s, " % details.find('File').text)
                    f.write("<b>Line: </b>%s" % details.find('Line').text)
                    f.write("</td>\n")
                    
                    #
                    # End the table row
                    #
                    f.write("</td>\n")
            else:
                #
                # If this particular test case passed, then we just print the PASS
                # result in green, followed by the test case name and its execution
                # time information.  These go off in <td> ... </td> table data.
                # The details table entry is left blank.
                #
                #   +--------+----------------+------+---------+
                #   | Result | Test Case Name | Time | Details |
                #   +--------+----------------+------+---------+
                #   |  PASS  | The name       | time |         |   
                #   +--------+----------------+------+---------+
                #
                f.write("<tr>\n")
                f.write("<td style=\"color:green\">%s</td>\n" % result)
                f.write("<td>%s</td>\n" % name)
                f.write("<td>%s</td>\n" % time)
                f.write("<td></td>\n")
                f.write("</tr>\n")
        #
        # All of the rows are written, so we need to end the table.
        #
        f.write("</table>\n")

    #
    # That's it for all of the test suites.  Now we have to do something about 
    # our examples.
    #
    f.write("<h2>Examples</h2>\n")

    #
    # Example status is rendered in a table just like the suites.
    #
    f.write("<table border=\"1\">\n")

    #
    # The table headings look like,
    #
    #   +--------+--------------+--------------+
    #   | Result | Example Name | Elapsed Time |
    #   +--------+--------------+--------------+
    #
    f.write("<th> Result </th>\n")
    f.write("<th>Example Name</th>\n")
    f.write("<th>Elapsed Time</th>\n")

    #
    # Now iterate through all of the examples
    #
    for example in et.findall("Example"):
        
        #
        # Start a new row for each example
        #
        f.write("<tr>\n")
        
        #
        # Get the result and name of the example in question
        #
        (result, name, time) = read_test(example)

        #
        # If the example either failed or crashed, print its result status
        # in red; otherwise green.  This goes in a <td> ... </td> table data
        #
        if result == "PASS":
            f.write("<td style=\"color:green\">%s</td>\n" % result)
        elif result == "SKIP":
            f.write("<td style=\"color:#ff6600\">%s</fd>\n" % result)
        else:
            f.write("<td style=\"color:red\">%s</td>\n" % result)

        #
        # Write the example name as a new tag data.
        #
        f.write("<td>%s</td>\n" % name)

        #
        # Write the elapsed time as a new tag data.
        #
        f.write("<td>%s</td>\n" % time)

        #
        # That's it for the current example, so terminate the row.
        #
        f.write("</tr>\n")

    #
    # That's it for the table of examples, so terminate the table.
    #
    f.write("</table>\n")

    #
    # And that's it for the report, so finish up.
    #
    f.write("</body>\n")
    f.write("</html>\n")
    f.close()
    
#
# Python Control-C handling is broken in the presence of multiple threads.  
# Signals get delivered to the runnable/running thread by default and if 
# it is blocked, the signal is simply ignored.  So we hook sigint and set 
# a global variable telling the system to shut down gracefully.
#
thread_exit = False

def sigint_hook(signal, frame):
    global thread_exit
    thread_exit = True
    return 0


#
# In general, the build process itself naturally takes care of figuring out
# which tests are built into the test runner.  For example, if waf configure
# determines that ENABLE_EMU is false due to some missing dependency,
# the tests for the emu net device simply will not be built and will 
# therefore not be included in the built test runner.
#
# Examples, however, are a different story.  In that case, we are just given
# a list of examples that could be run.  Instead of just failing, for example,
# nsc-tcp-zoo if NSC is not present, we look into the waf saved configuration
# for relevant configuration items.  
#
# XXX This function pokes around in the waf internal state file.  To be a
# little less hacky, we should add a commmand to waf to return this info
# and use that result.
#
def read_waf_config():
    for line in open(".lock-waf_" + sys.platform + "_build", "rt"):
        if line.startswith("top_dir ="):
            key, val = line.split('=')
            top_dir = eval(val.strip())
        if line.startswith("out_dir ="):
            key, val = line.split('=')
            out_dir = eval(val.strip())
    global NS3_BASEDIR
    NS3_BASEDIR = top_dir
    global NS3_BUILDDIR
    NS3_BUILDDIR = out_dir
    for line in open("%s/c4che/_cache.py" % out_dir).readlines():
        for item in interesting_config_items:
            if line.startswith(item):
                exec(line, globals())

    if options.verbose:
        for item in interesting_config_items:
            print("%s ==" % item, eval(item))

#
# It seems pointless to fork a process to run waf to fork a process to run
# the test runner, so we just run the test runner directly.  The main thing 
# that waf would do for us would be to sort out the shared library path but
# we can deal with that easily and do here.
#
# There can be many different ns-3 repositories on a system, and each has 
# its own shared libraries, so ns-3 doesn't hardcode a shared library search
# path -- it is cooked up dynamically, so we do that too.
#
def make_paths():
    have_DYLD_LIBRARY_PATH = False
    have_LD_LIBRARY_PATH = False
    have_PATH = False
    have_PYTHONPATH = False

    keys = list(os.environ.keys())
    for key in keys:
        if key == "DYLD_LIBRARY_PATH":
            have_DYLD_LIBRARY_PATH = True
        if key == "LD_LIBRARY_PATH":
            have_LD_LIBRARY_PATH = True
        if key == "PATH":
            have_PATH = True
        if key == "PYTHONPATH":
            have_PYTHONPATH = True

    pypath = os.environ["PYTHONPATH"] = os.path.join (NS3_BUILDDIR, "bindings", "python")

    if not have_PYTHONPATH:
        os.environ["PYTHONPATH"] = pypath
    else:
        os.environ["PYTHONPATH"] += ":" + pypath

    if options.verbose:
        print("os.environ[\"PYTHONPATH\"] == %s" % os.environ["PYTHONPATH"])

    if sys.platform == "darwin":
        if not have_DYLD_LIBRARY_PATH:
            os.environ["DYLD_LIBRARY_PATH"] = ""
        for path in NS3_MODULE_PATH:
            os.environ["DYLD_LIBRARY_PATH"] += ":" + path
        if options.verbose:
            print("os.environ[\"DYLD_LIBRARY_PATH\"] == %s" % os.environ["DYLD_LIBRARY_PATH"])
    elif sys.platform == "win32":
        if not have_PATH:
            os.environ["PATH"] = ""
        for path in NS3_MODULE_PATH:
            os.environ["PATH"] += ';' + path
        if options.verbose:
            print("os.environ[\"PATH\"] == %s" % os.environ["PATH"])
    elif sys.platform == "cygwin":
        if not have_PATH:
            os.environ["PATH"] = ""
        for path in NS3_MODULE_PATH:
            os.environ["PATH"] += ":" + path
        if options.verbose:
            print("os.environ[\"PATH\"] == %s" % os.environ["PATH"])
    else:
        if not have_LD_LIBRARY_PATH:
            os.environ["LD_LIBRARY_PATH"] = ""
        for path in NS3_MODULE_PATH:
            os.environ["LD_LIBRARY_PATH"] += ":" + str(path)
        if options.verbose:
            print("os.environ[\"LD_LIBRARY_PATH\"] == %s" % os.environ["LD_LIBRARY_PATH"])

#
# Short note on generating suppressions:
#
# See the valgrind documentation for a description of suppressions.  The easiest
# way to generate a suppression expression is by using the valgrind 
# --gen-suppressions option.  To do that you have to figure out how to run the 
# test in question.
#
# If you do "test.py -v -g -s <suitename> then test.py will output most of what
# you need.  For example, if you are getting a valgrind error in the
# devices-mesh-dot11s-regression test suite, you can run:
#
#   ./test.py -v -g -s devices-mesh-dot11s-regression 
#
# You should see in the verbose output something that looks like:
#
#   Synchronously execute valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
#   --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug 
#   --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev 
#   --tempdir=testpy-output/2010-01-12-22-47-50-CUT 
#   --out=testpy-output/2010-01-12-22-47-50-CUT/devices-mesh-dot11s-regression.xml
#
# You need to pull out the useful pieces, and so could run the following to 
# reproduce your error:
#
#   valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
#   --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug 
#   --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev 
#   --tempdir=testpy-output 
#
# Hint: Use the first part of the command as is, and point the "tempdir" to 
# somewhere real.  You don't need to specify an "out" file.
#
# When you run the above command you should see your valgrind error.  The 
# suppression expression(s) can be generated by adding the --gen-suppressions=yes
# option to valgrind.  Use something like:
#
#   valgrind --gen-suppressions=yes --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
#   --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug 
#   --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev 
#   --tempdir=testpy-output 
#
# Now when valgrind detects an error it will ask:
#
#   ==27235== ---- Print suppression ? --- [Return/N/n/Y/y/C/c] ----
#
# to which you just enter 'y'<ret>.
#
# You will be provided with a suppression expression that looks something like
# the following:
#   {
#     <insert_a_suppression_name_here>
#     Memcheck:Addr8
#     fun:_ZN3ns36dot11s15HwmpProtocolMac8SendPreqESt6vectorINS0_6IePreqESaIS3_EE
#     fun:_ZN3ns36dot11s15HwmpProtocolMac10SendMyPreqEv
#     fun:_ZN3ns36dot11s15HwmpProtocolMac18RequestDestinationENS_12Mac48AddressEjj
#     ...
#     the rest of the stack frame
#     ...
#   }
#
# You need to add a supression name which will only be printed out by valgrind in 
# verbose mode (but it needs to be there in any case).  The entire stack frame is
# shown to completely characterize the error, but in most cases you won't need 
# all of that info.  For example, if you want to turn off all errors that happen
# when the function (fun:) is called, you can just delete the rest of the stack
# frame.  You can also use wildcards to make the mangled signatures more readable.
#
# I added the following to the testpy.supp file for this particular error:
#
#   {
#     Supress invalid read size errors in SendPreq() when using HwmpProtocolMac
#     Memcheck:Addr8
#     fun:*HwmpProtocolMac*SendPreq*
#   }
#
# Now, when you run valgrind the error will be suppressed.
#
VALGRIND_SUPPRESSIONS_FILE = "testpy.supp"

def run_job_synchronously(shell_command, directory, valgrind, is_python, build_path=""):
    suppressions_path = os.path.join (NS3_BASEDIR, VALGRIND_SUPPRESSIONS_FILE)

    if is_python:
        path_cmd = PYTHON[0] + " " + os.path.join (NS3_BASEDIR, shell_command)
    else:
        if len(build_path):
            path_cmd = os.path.join (build_path, shell_command)
        else:
            path_cmd = os.path.join (NS3_BUILDDIR, shell_command)

    if valgrind:
        cmd = "valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 %s" % (suppressions_path, 
            path_cmd)
    else:
        cmd = path_cmd

    if options.verbose:
        print("Synchronously execute %s" % cmd)

    start_time = time.time()
    proc = subprocess.Popen(cmd, shell = True, cwd = directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    stdout_results, stderr_results = proc.communicate()
    elapsed_time = time.time() - start_time

    retval = proc.returncode
    try:
        stdout_results = stdout_results.decode()
    except UnicodeDecodeError:
        print("Non-decodable character in stdout output of %s" % cmd)
        print(stdout_results)
        retval = 1
    try:
        stderr_results = stderr_results.decode()
    except UnicodeDecodeError:
        print("Non-decodable character in stderr output of %s" % cmd)
        print(stderr_results)
        retval = 1

    #
    # valgrind sometimes has its own idea about what kind of memory management
    # errors are important.  We want to detect *any* leaks, so the way to do 
    # that is to look for the presence of a valgrind leak summary section.
    #
    # If another error has occurred (like a test suite has failed), we don't 
    # want to trump that error, so only do the valgrind output scan if the 
    # test has otherwise passed (return code was zero).
    #
    if valgrind and retval == 0 and "== LEAK SUMMARY:" in stderr_results:
        retval = 2
    
    if options.verbose:
        print("Return code = ", retval)
        print("stderr = ", stderr_results)

    return (retval, stdout_results, stderr_results, elapsed_time)

#
# This class defines a unit of testing work.  It will typically refer to
# a test suite to run using the test-runner, or an example to run directly.
#
class Job:
    def __init__(self):
        self.is_break = False
        self.is_skip = False
        self.is_example = False
        self.is_pyexample = False
        self.shell_command = ""
        self.display_name = ""
        self.basedir = ""
        self.tempdir = ""
        self.cwd = ""
        self.tmp_file_name = ""
        self.returncode = False
        self.elapsed_time = 0
        self.build_path = ""

    #
    # A job is either a standard job or a special job indicating that a worker
    # thread should exist.  This special job is indicated by setting is_break 
    # to true.
    #
    def set_is_break(self, is_break):
        self.is_break = is_break

    #
    # If a job is to be skipped, we actually run it through the worker threads
    # to keep the PASS, FAIL, CRASH and SKIP processing all in one place.
    #
    def set_is_skip(self, is_skip):
        self.is_skip = is_skip

    #
    # Examples are treated differently than standard test suites.  This is
    # mostly because they are completely unaware that they are being run as 
    # tests.  So we have to do some special case processing to make them look
    # like tests.
    #
    def set_is_example(self, is_example):
        self.is_example = is_example

    #
    # Examples are treated differently than standard test suites.  This is
    # mostly because they are completely unaware that they are being run as 
    # tests.  So we have to do some special case processing to make them look
    # like tests.
    #
    def set_is_pyexample(self, is_pyexample):
        self.is_pyexample = is_pyexample

    #
    # This is the shell command that will be executed in the job.  For example,
    #
    #  "utils/ns3-dev-test-runner-debug --test-name=some-test-suite"
    #
    def set_shell_command(self, shell_command):
        self.shell_command = shell_command

    #
    # This is the build path where ns-3 was built.  For example,
    #
    #  "/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build/debug"
    #
    def set_build_path(self, build_path):
        self.build_path = build_path

    #
    # This is the dispaly name of the job, typically the test suite or example 
    # name.  For example,
    #
    #  "some-test-suite" or "udp-echo"
    #
    def set_display_name(self, display_name):
        self.display_name = display_name

    #
    # This is the base directory of the repository out of which the tests are
    # being run.  It will be used deep down in the testing framework to determine
    # where the source directory of the test was, and therefore where to find 
    # provided test vectors.  For example,
    #
    #  "/home/user/repos/ns-3-dev"
    #
    def set_basedir(self, basedir):
        self.basedir = basedir

    #
    # This is the directory to which a running test suite should write any 
    # temporary files.
    #
    def set_tempdir(self, tempdir):
        self.tempdir = tempdir

    #
    # This is the current working directory that will be given to an executing
    # test as it is being run.  It will be used for examples to tell them where
    # to write all of the pcap files that we will be carefully ignoring.  For
    # example,
    #
    #  "/tmp/unchecked-traces"
    #
    def set_cwd(self, cwd):
        self.cwd = cwd

    #
    # This is the temporary results file name that will be given to an executing 
    # test as it is being run.  We will be running all of our tests in parallel
    # so there must be multiple temporary output files.  These will be collected
    # into a single XML file at the end and then be deleted.  
    #
    def set_tmp_file_name(self, tmp_file_name):
        self.tmp_file_name = tmp_file_name

    #
    # The return code received when the job process is executed.
    #
    def set_returncode(self, returncode):
        self.returncode = returncode

    #
    # The elapsed real time for the job execution.
    #
    def set_elapsed_time(self, elapsed_time):
        self.elapsed_time = elapsed_time

#
# The worker thread class that handles the actual running of a given test.
# Once spawned, it receives requests for work through its input_queue and
# ships the results back through the output_queue.
#
class worker_thread(threading.Thread):
    def __init__(self, input_queue, output_queue):
        threading.Thread.__init__(self)
        self.input_queue = input_queue
        self.output_queue = output_queue

    def run(self):
        while True:
            job = self.input_queue.get()
            #
            # Worker threads continue running until explicitly told to stop with
            # a special job.
            #
            if job.is_break:
                return
            #
            # If the global interrupt handler sets the thread_exit variable,
            # we stop doing real work and just report back a "break" in the
            # normal command processing has happened.
            #
            if thread_exit == True:
                job.set_is_break(True)
                self.output_queue.put(job)
                continue

            #
            # If we are actually supposed to skip this job, do so.  Note that
            # if is_skip is true, returncode is undefined.
            #
            if job.is_skip:
                if options.verbose:
                    print("Skip %s" % job.shell_command)
                self.output_queue.put(job)
                continue

            #
            # Otherwise go about the business of running tests as normal.
            #
            else:
                if options.verbose:
                    print("Launch %s" % job.shell_command)

                if job.is_example or job.is_pyexample:
                    #
                    # If we have an example, the shell command is all we need to
                    # know.  It will be something like "examples/udp/udp-echo" or 
                    # "examples/wireless/mixed-wireless.py"
                    #
                    (job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command, 
                        job.cwd, options.valgrind, job.is_pyexample, job.build_path)
                else:
                    #
                    # If we're a test suite, we need to provide a little more info
                    # to the test runner, specifically the base directory and temp
                    # file name
                    #
                    if options.update_data:
                        update_data = '--update-data'
                    else:
                        update_data = ''
                    (job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command + 
                        " --xml --tempdir=%s --out=%s %s" % (job.tempdir, job.tmp_file_name, update_data), 
                        job.cwd, options.valgrind, False)

                job.set_elapsed_time(et)

                if options.verbose:
                    print("returncode = %d" % job.returncode)
                    print("---------- begin standard out ----------")
                    print(standard_out)
                    print("---------- begin standard err ----------")
                    print(standard_err)
                    print("---------- end standard err ----------")

                self.output_queue.put(job)

#
# This is the main function that does the work of interacting with the
# test-runner itself.
#
def run_tests():
    #
    # Pull some interesting configuration information out of waf, primarily
    # so we can know where executables can be found, but also to tell us what
    # pieces of the system have been built.  This will tell us what examples 
    # are runnable.
    #
    read_waf_config()

    #
    # Set the proper suffix.
    #
    global BUILD_PROFILE_SUFFIX
    if BUILD_PROFILE == 'release': 
        BUILD_PROFILE_SUFFIX = ""
    else:
        BUILD_PROFILE_SUFFIX = "-" + BUILD_PROFILE

    #
    # Add the proper prefix and suffix to the test-runner name to
    # match what is done in the wscript file.
    #
    test_runner_name = "%s%s-%s%s" % (APPNAME, VERSION, "test-runner", BUILD_PROFILE_SUFFIX)

    #
    # Run waf to make sure that everything is built, configured and ready to go
    # unless we are explicitly told not to.  We want to be careful about causing
    # our users pain while waiting for extraneous stuff to compile and link, so
    # we allow users that know what they''re doing to not invoke waf at all.
    #
    if not options.nowaf:

        #
        # If the user is running the "kinds" or "list" options, there is an 
        # implied dependency on the test-runner since we call that program
        # if those options are selected.  We will exit after processing those
        # options, so if we see them, we can safely only build the test-runner.
        #
        # If the user has constrained us to running only a particular type of
        # file, we can only ask waf to build what we know will be necessary.
        # For example, if the user only wants to run BVT tests, we only have
        # to build the test-runner and can ignore all of the examples.
        #
        # If the user only wants to run a single example, then we can just build
        # that example.
        #
        # If there is no constraint, then we have to build everything since the
        # user wants to run everything.
        #
        if options.kinds or options.list or (len(options.constrain) and options.constrain in core_kinds):
            if sys.platform == "win32":
                waf_cmd = sys.executable + " waf --target=test-runner"
            else:
                waf_cmd = sys.executable + " waf --target=test-runner"
        elif len(options.example):
            if sys.platform == "win32": #Modify for windows
                waf_cmd = sys.executable + " waf --target=%s" % os.path.basename(options.example)
            else:
                waf_cmd = sys.executable + " waf --target=%s" % os.path.basename(options.example)

        else:
            if sys.platform == "win32": #Modify for windows
                waf_cmd = sys.executable + " waf"
            else:
                waf_cmd = sys.executable + " waf"

        if options.verbose:
            print("Building: %s" % waf_cmd)

        proc = subprocess.Popen(waf_cmd, shell = True)
        proc.communicate()
        if proc.returncode:
            print("Waf died. Not running tests", file=sys.stderr)
            return proc.returncode


    #
    # Dynamically set up paths.
    #
    make_paths()

    #
    # Get the information from the build status file.
    #
    build_status_file = os.path.join (NS3_BUILDDIR, 'build-status.py')
    if os.path.exists(build_status_file):
        ns3_runnable_programs = get_list_from_file(build_status_file, "ns3_runnable_programs")
        ns3_runnable_scripts = get_list_from_file(build_status_file, "ns3_runnable_scripts")
    else:
        print('The build status file was not found.  You must do waf build before running test.py.', file=sys.stderr)
        sys.exit(2)

    #
    # Make a dictionary that maps the name of a program to its path.
    #
    ns3_runnable_programs_dictionary = {}
    for program in ns3_runnable_programs:
        # Remove any directory names from path.
        program_name = os.path.basename(program)
        ns3_runnable_programs_dictionary[program_name] = program

    # Generate the lists of examples to run as smoke tests in order to
    # ensure that they remain buildable and runnable over time.
    #
    example_tests = []
    example_names_original = []
    python_tests = []
    for directory in EXAMPLE_DIRECTORIES:
        # Set the directories and paths for this example. 
        example_directory   = os.path.join("examples", directory)
        examples_to_run_path = os.path.join(example_directory, "examples-to-run.py")
        cpp_executable_dir   = os.path.join(NS3_BUILDDIR, example_directory)
        python_script_dir    = os.path.join(example_directory)

        # Parse this example directory's file.
        parse_examples_to_run_file(
            examples_to_run_path,
            cpp_executable_dir,
            python_script_dir,
            example_tests,
            example_names_original,
            python_tests)

    for module in NS3_ENABLED_MODULES:
        # Remove the "ns3-" from the module name.
        module = module[len("ns3-"):]

        # Set the directories and paths for this example. 
        module_directory     = os.path.join("src", module)
        example_directory    = os.path.join(module_directory, "examples")
        examples_to_run_path = os.path.join(module_directory, "test", "examples-to-run.py")
        cpp_executable_dir   = os.path.join(NS3_BUILDDIR, example_directory)
        python_script_dir    = os.path.join(example_directory)

        # Parse this module's file.
        parse_examples_to_run_file(
            examples_to_run_path,
            cpp_executable_dir,
            python_script_dir,
            example_tests,
            example_names_original,
            python_tests)

    #
    # If lots of logging is enabled, we can crash Python when it tries to 
    # save all of the text.  We just don't allow logging to be turned on when
    # test.py runs.  If you want to see logging output from your tests, you
    # have to run them using the test-runner directly.
    #
    os.environ["NS_LOG"] = ""

    #
    # There are a couple of options that imply we can to exit before starting
    # up a bunch of threads and running tests.  Let's detect these cases and 
    # handle them without doing all of the hard work.
    #
    if options.kinds:
        path_cmd = os.path.join("utils", test_runner_name + " --print-test-type-list")
        (rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
        print(standard_out.decode())

    if options.list:
        if len(options.constrain):
            path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --print-test-types --test-type=%s" % options.constrain)
        else:
            path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --print-test-types")
        (rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
        if rc != 0:
            # This is usually a sign that ns-3 crashed or exited uncleanly
            print(('test.py error:  test-runner return code returned {}'.format(rc)))
            print(('To debug, try running {}\n'.format('\'./waf --run \"test-runner --print-test-name-list\"\'')))
            return
        if isinstance(standard_out, bytes):
            standard_out = standard_out.decode()
        list_items = standard_out.split('\n')
        list_items.sort()
        print("Test Type    Test Name")
        print("---------    ---------")
        for item in list_items:
            if len(item.strip()):
                print(item)
        example_names_original.sort()
        for item in example_names_original:
                print("example     ", item)
        print()

    if options.kinds or options.list:
        return

    #
    # We communicate results in two ways.  First, a simple message relating 
    # PASS, FAIL, CRASH or SKIP is always written to the standard output.  It 
    # is expected that this will be one of the main use cases.  A developer can
    # just run test.py with no options and see that all of the tests still 
    # pass.
    #
    # The second main use case is when detailed status is requested (with the
    # --text or --html options).  Typicall this will be text if a developer
    # finds a problem, or HTML for nightly builds.  In these cases, an
    # XML file is written containing the status messages from the test suites.
    # This file is then read and translated into text or HTML.  It is expected
    # that nobody will really be interested in the XML, so we write it somewhere
    # with a unique name (time) to avoid collisions.  In case an error happens, we
    # provide a runtime option to retain the temporary files.
    #
    # When we run examples as smoke tests, they are going to want to create
    # lots and lots of trace files.  We aren't really interested in the contents
    # of the trace files, so we also just stash them off in the temporary dir.
    # The retain option also causes these unchecked trace files to be kept.
    #
    date_and_time = time.strftime("%Y-%m-%d-%H-%M-%S-CUT", time.gmtime())

    if not os.path.exists(TMP_OUTPUT_DIR):
        os.makedirs(TMP_OUTPUT_DIR)

    testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time);

    if not os.path.exists(testpy_output_dir):
        os.makedirs(testpy_output_dir)

    #
    # Create the main output file and start filling it with XML.  We need to 
    # do this since the tests will just append individual results to this file.
    #
    xml_results_file = os.path.join(testpy_output_dir, "results.xml")
    f = open(xml_results_file, 'w')
    f.write('<?xml version="1.0"?>\n')
    f.write('<Results>\n')
    f.close()

    #
    # We need to figure out what test suites to execute.  We are either given one 
    # suite or example explicitly via the --suite or --example/--pyexample option,
    # or we need to call into the test runner and ask it to list all of the available
    # test suites.  Further, we need to provide the constraint information if it
    # has been given to us.
    # 
    # This translates into allowing the following options with respect to the 
    # suites
    #
    #  ./test,py:                                           run all of the suites and examples
    #  ./test.py --constrain=core:                          run all of the suites of all kinds
    #  ./test.py --constrain=unit:                          run all unit suites
    #  ./test.py --suite=some-test-suite:                   run a single suite
    #  ./test.py --example=examples/udp/udp-echo:           run single example
    #  ./test.py --pyexample=examples/wireless/mixed-wireless.py:  run python example
    #  ./test.py --suite=some-suite --example=some-example: run the single suite
    #
    # We can also use the --constrain option to provide an ordering of test 
    # execution quite easily.
    #

    # Flag indicating a specific suite was explicitly requested
    single_suite = False
    
    if len(options.suite):
        # See if this is a valid test suite.
        path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
        (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
        if isinstance(suites, bytes):
            suites = suites.decode()
        if options.suite in suites.split('\n'):
            suites = options.suite + "\n"
            single_suite = True
        else:
            print('The test suite was not run because an unknown test suite name was requested.', file=sys.stderr)
            sys.exit(2)

    elif len(options.example) == 0 and len(options.pyexample) == 0:
        if len(options.constrain):
            path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --test-type=%s" % options.constrain)
            (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
        else:
            path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
            (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
    else:
        suites = ""

    #
    # suite_list will either a single test suite name that the user has 
    # indicated she wants to run or a list of test suites provided by
    # the test-runner possibly according to user provided constraints.
    # We go through the trouble of setting up the parallel execution 
    # even in the case of a single suite to avoid having to process the
    # results in two different places.
    #
    if isinstance(suites, bytes):
        suites = suites.decode()
    suite_list = suites.split('\n')

    #
    # Performance tests should only be run when they are requested,
    # i.e. they are not run by default in test.py.
    # If a specific suite was requested we run it, even if
    # it is a performance test.
    if not single_suite and options.constrain != 'performance':

        # Get a list of all of the performance tests.
        path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --test-type=%s" % "performance")
        (rc, performance_tests, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
        if isinstance(performance_tests, bytes):
            performance_tests = performance_tests.decode()
        performance_test_list = performance_tests.split('\n')

        # Remove any performance tests from the suites list.
        for performance_test in performance_test_list:
            if performance_test in suite_list:
                suite_list.remove(performance_test)

    # We now have a possibly large number of test suites to run, so we want to
    # run them in parallel.  We're going to spin up a number of worker threads
    # that will run our test jobs for us.
    #
    input_queue = queue.Queue(0)
    output_queue = queue.Queue(0)

    jobs = 0
    threads=[]

    #
    # In Python 2.6 you can just use multiprocessing module, but we don't want
    # to introduce that dependency yet; so we jump through a few hoops.
    #
    processors = 1

    if sys.platform != "win32":
        if 'SC_NPROCESSORS_ONLN'in os.sysconf_names:
            processors = os.sysconf('SC_NPROCESSORS_ONLN')
        else:
            proc = subprocess.Popen("sysctl -n hw.ncpu", shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            stdout_results, stderr_results = proc.communicate()
            stdout_results = stdout_results.decode()
            stderr_results = stderr_results.decode()
            if len(stderr_results) == 0:
                processors = int(stdout_results)

    #
    # Now, spin up one thread per processor which will eventually mean one test
    # per processor running concurrently.
    #
    for i in range(processors):
        thread = worker_thread(input_queue, output_queue)
        threads.append(thread)
        thread.start()

    #
    # Keep track of some summary statistics
    #
    total_tests = 0
    skipped_tests = 0
    skipped_testnames = []

    #
    # We now have worker threads spun up, and a list of work to do.  So, run 
    # through the list of test suites and dispatch a job to run each one.
    # 
    # Dispatching will run with unlimited speed and the worker threads will 
    # execute as fast as possible from the queue.
    #
    # Note that we actually dispatch tests to be skipped, so all of the 
    # PASS, FAIL, CRASH and SKIP processing is done in the same place.
    #
    for test in suite_list:
        test = test.strip()
        if len(test):
            job = Job()
            job.set_is_example(False)
            job.set_is_pyexample(False)
            job.set_display_name(test)
            job.set_tmp_file_name(os.path.join(testpy_output_dir, "%s.xml" % test))
            job.set_cwd(os.getcwd())
            job.set_basedir(os.getcwd())
            job.set_tempdir(testpy_output_dir)
            if (options.multiple):
                multiple = ""
            else:
                multiple = " --stop-on-failure"
            if (len(options.fullness)):
                fullness = options.fullness.upper()
                fullness = " --fullness=%s" % fullness
            else:
                fullness = " --fullness=QUICK"

            path_cmd = os.path.join("utils", test_runner_name + " --test-name=%s%s%s" % (test, multiple, fullness))

            job.set_shell_command(path_cmd)

            if options.valgrind and test in core_valgrind_skip_tests:
                job.set_is_skip(True)

            # Skip tests that will fail if NSC is missing.
            if not NSC_ENABLED and test in core_nsc_missing_skip_tests:
                job.set_is_skip(True)

            if options.verbose:
                print("Queue %s" % test)

            input_queue.put(job)
            jobs = jobs + 1
            total_tests = total_tests + 1
    
    #
    # We've taken care of the discovered or specified test suites.  Now we
    # have to deal with examples run as smoke tests.  We have a list of all of
    # the example programs it makes sense to try and run.  Each example will
    # have a condition associated with it that must evaluate to true for us
    # to try and execute it.  This is used to determine if the example has
    # a dependency that is not satisfied.  For example, if an example depends
    # on NSC being configured by waf, that example should have a condition
    # that evaluates to true if NSC is enabled.  For example,
    #
    #      ("tcp-nsc-zoo", "NSC_ENABLED == True"),
    #
    # In this case, the example "tcp-nsc-zoo" will only be run if we find the
    # waf configuration variable "NSC_ENABLED" to be True.
    #
    # We don't care at all how the trace files come out, so we just write them 
    # to a single temporary directory.
    #
    # XXX As it stands, all of the trace files have unique names, and so file
    # collisions can only happen if two instances of an example are running in
    # two versions of the test.py process concurrently.  We may want to create
    # uniquely named temporary traces directories to avoid this problem.
    #
    # We need to figure out what examples to execute.  We are either given one 
    # suite or example explicitly via the --suite or --example option, or we
    # need to walk the list of examples looking for available example 
    # conditions.
    #
    # This translates into allowing the following options with respect to the 
    # suites
    #
    #  ./test.py:                                           run all of the examples
    #  ./test.py --constrain=unit                           run no examples
    #  ./test.py --constrain=example                        run all of the examples
    #  ./test.py --suite=some-test-suite:                   run no examples
    #  ./test.py --example=some-example:                    run the single example
    #  ./test.py --suite=some-suite --example=some-example: run the single example
    #
    #
    if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
        if len(options.constrain) == 0 or options.constrain == "example":
            if ENABLE_EXAMPLES:
                for name, test, do_run, do_valgrind_run in example_tests:
                    # Remove any arguments and directory names from test.
                    test_name = test.split(' ', 1)[0] 
                    test_name = os.path.basename(test_name)

                    # Don't try to run this example if it isn't runnable.
                    if test_name in ns3_runnable_programs_dictionary:
                        if eval(do_run):
                            job = Job()
                            job.set_is_example(True)
                            job.set_is_pyexample(False)
                            job.set_display_name(name)
                            job.set_tmp_file_name("")
                            job.set_cwd(testpy_output_dir)
                            job.set_basedir(os.getcwd())
                            job.set_tempdir(testpy_output_dir)
                            job.set_shell_command(test)
                            job.set_build_path(options.buildpath)

                            if options.valgrind and not eval(do_valgrind_run):
                                job.set_is_skip (True)

                            if options.verbose:
                                print("Queue %s" % test)

                            input_queue.put(job)
                            jobs = jobs + 1
                            total_tests = total_tests + 1

    elif len(options.example):
        # Add the proper prefix and suffix to the example name to
        # match what is done in the wscript file.
        example_name = "%s%s-%s%s" % (APPNAME, VERSION, options.example, BUILD_PROFILE_SUFFIX)

        # Don't try to run this example if it isn't runnable.
        if example_name not in ns3_runnable_programs_dictionary:
            print("Example %s is not runnable." % example_name)
        else:
            #
            # If you tell me to run an example, I will try and run the example
            # irrespective of any condition.
            #
            example_path = ns3_runnable_programs_dictionary[example_name]
            example_path = os.path.abspath(example_path)
            job = Job()
            job.set_is_example(True)
            job.set_is_pyexample(False)
            job.set_display_name(example_path)
            job.set_tmp_file_name("")
            job.set_cwd(testpy_output_dir)
            job.set_basedir(os.getcwd())
            job.set_tempdir(testpy_output_dir)
            job.set_shell_command(example_path)
            job.set_build_path(options.buildpath)

            if options.verbose:
                print("Queue %s" % example_name)

            input_queue.put(job)
            jobs = jobs + 1
            total_tests = total_tests + 1

    #
    # Run some Python examples as smoke tests.  We have a list of all of
    # the example programs it makes sense to try and run.  Each example will
    # have a condition associated with it that must evaluate to true for us
    # to try and execute it.  This is used to determine if the example has
    # a dependency that is not satisfied.
    #
    # We don't care at all how the trace files come out, so we just write them 
    # to a single temporary directory.
    #
    # We need to figure out what python examples to execute.  We are either 
    # given one pyexample explicitly via the --pyexample option, or we
    # need to walk the list of python examples
    #
    # This translates into allowing the following options with respect to the 
    # suites
    #
    #  ./test.py --constrain=pyexample           run all of the python examples
    #  ./test.py --pyexample=some-example.py:    run the single python example
    #
    if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
        if len(options.constrain) == 0 or options.constrain == "pyexample":
            if ENABLE_EXAMPLES:
                for test, do_run in python_tests:
                    # Remove any arguments and directory names from test.
                    test_name = test.split(' ', 1)[0] 
                    test_name = os.path.basename(test_name)

                    # Don't try to run this example if it isn't runnable.
                    if test_name in ns3_runnable_scripts:
                        if eval(do_run):
                            job = Job()
                            job.set_is_example(False)
                            job.set_is_pyexample(True)
                            job.set_display_name(test)
                            job.set_tmp_file_name("")
                            job.set_cwd(testpy_output_dir)
                            job.set_basedir(os.getcwd())
                            job.set_tempdir(testpy_output_dir)
                            job.set_shell_command(test)
                            job.set_build_path("")

                            #
                            # Python programs and valgrind do not work and play
                            # well together, so we skip them under valgrind.
                            # We go through the trouble of doing all of this
                            # work to report the skipped tests in a consistent
                            # way throught the output formatter.
                            #
                            if options.valgrind:
                                job.set_is_skip (True)

                            #
                            # The user can disable python bindings, so we need
                            # to pay attention to that and give some feedback
                            # that we're not testing them
                            #
                            if not ENABLE_PYTHON_BINDINGS:
                                job.set_is_skip (True)

                            if options.verbose:
                                print("Queue %s" % test)

                            input_queue.put(job)
                            jobs = jobs + 1
                            total_tests = total_tests + 1

    elif len(options.pyexample):
        # Don't try to run this example if it isn't runnable.
        example_name = os.path.basename(options.pyexample)
        if example_name not in ns3_runnable_scripts:
            print("Example %s is not runnable." % example_name)
        else:
            #
            # If you tell me to run a python example, I will try and run the example
            # irrespective of any condition.
            #
            job = Job()
            job.set_is_pyexample(True)
            job.set_display_name(options.pyexample)
            job.set_tmp_file_name("")
            job.set_cwd(testpy_output_dir)
            job.set_basedir(os.getcwd())
            job.set_tempdir(testpy_output_dir)
            job.set_shell_command(options.pyexample)
            job.set_build_path("")

            if options.verbose:
                print("Queue %s" % options.pyexample)

            input_queue.put(job)
            jobs = jobs + 1
            total_tests = total_tests + 1

    #
    # Tell the worker threads to pack up and go home for the day.  Each one
    # will exit when they see their is_break task.
    #
    for i in range(processors):
        job = Job()
        job.set_is_break(True)
        input_queue.put(job)

    #
    # Now all of the tests have been dispatched, so all we have to do here
    # in the main thread is to wait for them to complete.  Keyboard interrupt
    # handling is broken as mentioned above.  We use a signal handler to catch
    # sigint and set a global variable.  When the worker threads sense this
    # they stop doing real work and will just start throwing jobs back at us
    # with is_break set to True.  In this case, there are no real results so we 
    # ignore them.  If there are real results, we always print PASS or FAIL to
    # standard out as a quick indication of what happened.
    #
    passed_tests = 0
    failed_tests = 0
    failed_testnames = []
    crashed_tests = 0
    crashed_testnames = []
    valgrind_errors = 0
    valgrind_testnames = []
    for i in range(jobs):
        job = output_queue.get()
        if job.is_break:
            continue

        if job.is_example or job.is_pyexample:
            kind = "Example"
        else:
            kind = "TestSuite"

        if job.is_skip:
            status = "SKIP"
            skipped_tests = skipped_tests + 1
            skipped_testnames.append(job.display_name)
        else:
            if job.returncode == 0:
                status = "PASS"
                passed_tests = passed_tests + 1
            elif job.returncode == 1:
                failed_tests = failed_tests + 1
                failed_testnames.append(job.display_name)
                status = "FAIL"
            elif job.returncode == 2:
                valgrind_errors = valgrind_errors + 1
                valgrind_testnames.append(job.display_name)
                status = "VALGR"
            else:
                crashed_tests = crashed_tests + 1
                crashed_testnames.append(job.display_name)
                status = "CRASH"

        if options.duration or options.constrain == "performance":
            print("%s (%.3f): %s %s" % (status, job.elapsed_time, kind, job.display_name))
        else:
            print("%s: %s %s" % (status, kind, job.display_name))

        if job.is_example or job.is_pyexample:
            #
            # Examples are the odd man out here.  They are written without any
            # knowledge that they are going to be run as a test, so we need to 
            # cook up some kind of output for them.  We're writing an xml file,
            # so we do some simple XML that says we ran the example.
            #
            # XXX We could add some timing information to the examples, i.e. run
            # them through time and print the results here.
            #
            f = open(xml_results_file, 'a')
            f.write('<Example>\n')
            example_name = "  <Name>%s</Name>\n" % job.display_name
            f.write(example_name)

            if status == "PASS":
                f.write('  <Result>PASS</Result>\n')
            elif status == "FAIL":
                f.write('  <Result>FAIL</Result>\n')
            elif status == "VALGR":
                f.write('  <Result>VALGR</Result>\n')
            elif status == "SKIP":
                f.write('  <Result>SKIP</Result>\n')
            else:
                f.write('  <Result>CRASH</Result>\n')

            f.write('  <Time real="%.3f"/>\n' % job.elapsed_time)
            f.write('</Example>\n')
            f.close()

        else:
            #
            # If we're not running an example, we're running a test suite.
            # These puppies are running concurrently and generating output
            # that was written to a temporary file to avoid collisions.
            #
            # Now that we are executing sequentially in the main thread, we can
            # concatenate the contents of the associated temp file to the main 
            # results file and remove that temp file.
            #
            # One thing to consider is that a test suite can crash just as
            # well as any other program, so we need to deal with that 
            # possibility as well.  If it ran correctly it will return 0
            # if it passed, or 1 if it failed.  In this case, we can count
            # on the results file it saved being complete.  If it crashed, it 
            # will return some other code, and the file should be considered 
            # corrupt and useless.  If the suite didn't create any XML, then
            # we're going to have to do it ourselves.
            #
            # Another issue is how to deal with a valgrind error.  If we run
            # a test suite under valgrind and it passes, we will get a return
            # code of 0 and there will be a valid xml results file since the code
            # ran to completion.  If we get a return code of 1 under valgrind,
            # the test case failed, but valgrind did not find any problems so the
            # test case return code was passed through.  We will have a valid xml
            # results file here as well since the test suite ran.  If we see a 
            # return code of 2, this means that valgrind found an error (we asked
            # it to return 2 if it found a problem in run_job_synchronously) but
            # the suite ran to completion so there is a valid xml results file.
            # If the suite crashes under valgrind we will see some other error 
            # return code (like 139).  If valgrind finds an illegal instruction or
            # some other strange problem, it will die with its own strange return
            # code (like 132).  However, if the test crashes by itself, not under
            # valgrind we will also see some other return code.
            #
            # If the return code is 0, 1, or 2, we have a valid xml file.  If we 
            # get another return code, we have no xml and we can't really say what
            # happened -- maybe the TestSuite crashed, maybe valgrind crashed due
            # to an illegal instruction.  If we get something beside 0-2, we assume
            # a crash and fake up an xml entry.  After this is all done, we still
            # need to indicate a valgrind error somehow, so we fake up an xml entry
            # with a VALGR result.  Thus, in the case of a working TestSuite that
            # fails valgrind, we'll see the PASS entry for the working TestSuite
            # followed by a VALGR failing test suite of the same name.
            #
            if job.is_skip:
                f = open(xml_results_file, 'a')
                f.write("<Test>\n")
                f.write("  <Name>%s</Name>\n" % job.display_name)
                f.write('  <Result>SKIP</Result>\n')
                f.write("</Test>\n")
                f.close()
            else:
                if job.returncode == 0 or job.returncode == 1 or job.returncode == 2:
                    f_to = open(xml_results_file, 'a')
                    f_from = open(job.tmp_file_name)
                    f_to.write(f_from.read())
                    f_to.close()
                    f_from.close()
                else:
                    f = open(xml_results_file, 'a')
                    f.write("<Test>\n")
                    f.write("  <Name>%s</Name>\n" % job.display_name)
                    f.write('  <Result>CRASH</Suite>\n')
                    f.write("</Test>\n")
                    f.close()

                    if job.returncode == 2:
                        f = open(xml_results_file, 'a')
                        f.write("<Test>\n")
                        f.write("  <Name>%s</Name>\n" % job.display_name)
                        f.write('  <Result>VALGR</Result>\n')
                        f.write("</Test>\n")
                        f.close()

    #
    # We have all of the tests run and the results written out.  One final 
    # bit of housekeeping is to wait for all of the threads to close down
    # so we can exit gracefully.
    #
    for thread in threads:
        thread.join()
    
    #
    # Back at the beginning of time, we started the body of an XML document
    # since the test suites and examples were going to just write their 
    # individual pieces.  So, we need to finish off and close out the XML 
    # document
    #
    f = open(xml_results_file, 'a')
    f.write('</Results>\n')
    f.close()

    #
    # Print a quick summary of events
    #
    print("%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)" % (passed_tests, 
        total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors))
    #
    # Repeat summary of skipped, failed, crashed, valgrind events 
    #
    if skipped_testnames:
        skipped_testnames.sort()
        print('List of SKIPped tests:\n    %s' % '\n    '.join(map(str, skipped_testnames)))
    if failed_testnames:
        failed_testnames.sort()
        print('List of FAILed tests:\n    %s' % '\n    '.join(map(str, failed_testnames)))
    if crashed_testnames:
        crashed_testnames.sort()
        print('List of CRASHed tests:\n    %s' % '\n    '.join(map(str, crashed_testnames)))
    if valgrind_testnames:
        valgrind_testnames.sort()
        print('List of VALGR failures:\n    %s' % '\n    '.join(map(str, valgrind_testnames)))
    #
    # The last things to do are to translate the XML results file to "human
    # readable form" if the user asked for it (or make an XML file somewhere)
    #
    if len(options.html):
        translate_to_html(xml_results_file, options.html)

    if len(options.text):
        translate_to_text(xml_results_file, options.text)

    if len(options.xml):
        shutil.copyfile(xml_results_file, options.xml)

    #
    # Let the user know if they need to turn on tests or examples.
    #
    if not ENABLE_TESTS or not ENABLE_EXAMPLES:
        print()
        if not ENABLE_TESTS:
            print('***  Note: ns-3 tests are currently disabled. Enable them by adding')
            print('***  "--enable-tests" to ./waf configure or modifying your .ns3rc file.')
            print()
        if not ENABLE_EXAMPLES:
            print('***  Note: ns-3 examples are currently disabled. Enable them by adding')
            print('***  "--enable-examples" to ./waf configure or modifying your .ns3rc file.')
            print()

    #
    # Let the user know if they tried to use valgrind but it was not
    # present on their machine.
    #
    if options.valgrind and not VALGRIND_FOUND:
        print()
        print('***  Note: you are trying to use valgrind, but valgrind could not be found')
        print('***  on your machine.  All tests and examples will crash or be skipped.')
        print()

    #
    # If we have been asked to retain all of the little temporary files, we
    # don't delete tm.  If we do delete the temporary files, delete only the
    # directory we just created.  We don't want to happily delete any retained
    # directories, which will probably surprise the user.
    #
    if not options.retain:
        shutil.rmtree(testpy_output_dir)

    if passed_tests + skipped_tests == total_tests:
        return 0 # success
    else:
        return 1 # catchall for general errors

def main(argv):
    parser = optparse.OptionParser()
    parser.add_option("-b", "--buildpath", action="store", type="string", dest="buildpath", default="",
                      metavar="BUILDPATH",
                      help="specify the path where ns-3 was built (defaults to the build directory for the current variant)")

    parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="",
                      metavar="KIND",
                      help="constrain the test-runner by kind of test")

    parser.add_option("-d", "--duration", action="store_true", dest="duration", default=False,
                      help="print the duration of each test suite and example")

    parser.add_option("-e", "--example", action="store", type="string", dest="example", default="",
                      metavar="EXAMPLE",
                      help="specify a single example to run (no relative path is needed)")

    parser.add_option("-u", "--update-data", action="store_true", dest="update_data", default=False,
                      help="If examples use reference data files, get them to re-generate them")

    parser.add_option("-f", "--fullness", action="store", type="string", dest="fullness", default="QUICK",
                      metavar="FULLNESS",
                      help="choose the duration of tests to run: QUICK, EXTENSIVE, or TAKES_FOREVER, where EXTENSIVE includes QUICK and TAKES_FOREVER includes QUICK and EXTENSIVE (only QUICK tests are run by default)")

    parser.add_option("-g", "--grind", action="store_true", dest="valgrind", default=False,
                      help="run the test suites and examples using valgrind")

    parser.add_option("-k", "--kinds", action="store_true", dest="kinds", default=False,
                      help="print the kinds of tests available")

    parser.add_option("-l", "--list", action="store_true", dest="list", default=False,
                      help="print the list of known tests")

    parser.add_option("-m", "--multiple", action="store_true", dest="multiple", default=False,
                      help="report multiple failures from test suites and test cases")

    parser.add_option("-n", "--nowaf", action="store_true", dest="nowaf", default=False,
                      help="do not run waf before starting testing")

    parser.add_option("-p", "--pyexample", action="store", type="string", dest="pyexample", default="",
                      metavar="PYEXAMPLE",
                      help="specify a single python example to run (with relative path)")

    parser.add_option("-r", "--retain", action="store_true", dest="retain", default=False,
                      help="retain all temporary files (which are normally deleted)")

    parser.add_option("-s", "--suite", action="store", type="string", dest="suite", default="",
                      metavar="TEST-SUITE",
                      help="specify a single test suite to run")

    parser.add_option("-t", "--text", action="store", type="string", dest="text", default="",
                      metavar="TEXT-FILE",
                      help="write detailed test results into TEXT-FILE.txt")

    parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
                      help="print progress and informational messages")

    parser.add_option("-w", "--web", "--html", action="store", type="string", dest="html", default="",
                      metavar="HTML-FILE",
                      help="write detailed test results into HTML-FILE.html")

    parser.add_option("-x", "--xml", action="store", type="string", dest="xml", default="",
                      metavar="XML-FILE",
                      help="write detailed test results into XML-FILE.xml")

    global options
    options = parser.parse_args()[0]
    signal.signal(signal.SIGINT, sigint_hook)

    return run_tests()

if __name__ == '__main__':
    sys.exit(main(sys.argv))