File: run.py

package info (click to toggle)
bowtie2 2.5.4-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 27,492 kB
  • sloc: cpp: 63,838; perl: 7,232; sh: 1,131; python: 987; makefile: 541; ansic: 122
file content (128 lines) | stat: -rwxr-xr-x 5,227 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#!/usr/bin/python3
"""
   Runs benchmark sets specified in JSON files.

Example:   
   # Runs a test set specified in simple.json and
   # record this run under id "test".
   
   run.py -t simple.json -i test 
   
   # runs same test using another bowtie binaries from 
   # directory bowtie-devel.
   
   run.py -t simple.json -i test -b ~/bowtie-devel
   
   # Runs all *.json benchmark test sets from ~/bt2_benchmarks
   # recording them under the output directory "records/all_tests". 
   
   run.py -s ~/bt2_benchmarks -i all_tests -o records

"""

import os
import logging
import benchmarks as bm
from optparse import OptionParser


def parse_args():
    usage = " %prog [options] \n\n"
    usage += "Runs specified bowtie2 benchmarking tests.\n"
    usage += "Benchmarks are defined using JSON test sets files. For each test\n"
    usage += "it is defined what input data prerequisites are required, commands\n"
    usage += "used to generate them in case they are missing, what command to test\n"
    usage += "and what metrics to record."
    parser = OptionParser(usage=usage)
    parser.add_option("-t", "--benchmark-test",
                      action="store", type="string", dest="benchmark_test", default=None,
                      help="One benchmark tests to run. Tests are files in JSON "
                           "format describing a set of test, input data requirements and what "
                           "metrics to record.")
    parser.add_option("-s", "--benchmarks-dir",
                      action="store", type="string", dest="benchmarks_dir", default=None,
                      help="A path to a directory with benchmark tests to run. All files "
                           "with .json extension will be loaded and run."
                           "metrics to record.")
    parser.add_option("-i", "--benchmark-id",
                      action="store", type="string", dest="benchmark_id", default=None,
                      help="(mandatory).What name/id to use for this recording.")
    parser.add_option("-d", "--download-dir",
                      action="store", type="string", dest="download_dir", default='download',
                      help=" (Default: ./download).The directory path where all input "
                           "data used by the benchmarks should be stored.")
    parser.add_option("-b", "--bowtie-dir",
                      action="store", type="string", dest="bowtie_dir", default='bowtie2',
                      help="(Default: ./bowtie2).Bowtie directory.")
    parser.add_option("-o", "--output-dir",
                      action="store", type="string", dest="out_dir", default='benchmark_rezults',
                      help="(Default: ./out). Directory where tests results are going to "
                           "be written.")
    parser.add_option("-l", "--log-file",
                      action="store", type="string", dest="log_fname", default=None,
                      help="(Default: stderr). Log file name if desired.")
    parser.add_option("-v", "--verbose",
                      action="store_true", dest="verbose", default=False,
                      help="Print more info about each step.")
    parser.add_option("-u", "--debug",
                      action="store_true", dest="debug", default=False,
                      help="Print extra debug info. Used only for diagnosing purpose.")

    (options, args) = parser.parse_args()

    if options.benchmark_id is None:
        print("Mandatory option is missing (--benchmark-id)")
        parser.print_help()
        exit(-1)

    return options


if __name__ == "__main__":
    options = parse_args()

    if options.log_fname is not None:
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            filename=options.log_fname,
                            level=logging.ERROR)
    else:
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.ERROR)

    if options.verbose:
        logging.getLogger().setLevel(level=logging.INFO)

    if options.debug:
        logging.getLogger().setLevel(level=logging.DEBUG)

    curr_path = os.getcwd()

    if not os.path.exists(options.out_dir):
        logging.debug("Creating output dir: %s" % options.out_dir)
        os.mkdir(options.out_dir)

    if not os.path.exists(options.download_dir):
        logging.debug("Creating download dir %s" % options.download_dir)
        os.mkdir(options.download_dir)

    if options.benchmarks_dir is not None:
        if not os.path.exists(options.benchmarks_dir):
            logging.error("Cannot find benchmark directory %s" % options.benchmarks_dir)

    batch_benchmarks = bm.Benchmarks(benchmarks_dir=options.benchmarks_dir,
                                     benchmark_test=options.benchmark_test,
                                     data_dir=options.download_dir,
                                     output_dir=options.out_dir,
                                     bin_dir=options.bowtie_dir,
                                     benchmark_id=options.benchmark_id)

    for test_set in batch_benchmarks:
        if not test_set.input_data_loaded:
            test_set.load()
        test_set.run()