1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
|
# Copyright 2015-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Commandline argument parsing for our test runner.
"""
import collections
import getopt
import stem.util.conf
import stem.util.log
import test
LOG_TYPE_ERROR = """\
'%s' isn't a logging runlevel, use one of the following instead:
TRACE, DEBUG, INFO, NOTICE, WARN, ERROR
"""
CONFIG = stem.util.conf.config_dict('test', {
'msg.help': '',
'target.description': {},
'target.torrc': {},
})
DEFAULT_ARGS = {
'run_unit': False,
'run_integ': False,
'specific_test': [],
'logging_runlevel': None,
'logging_path': None,
'tor_path': 'tor',
'run_targets': [test.Target.RUN_OPEN],
'attribute_targets': [],
'quiet': False,
'verbose': False,
'print_help': False,
}
OPT = 'auit:l:qvh'
OPT_EXPANDED = ['all', 'unit', 'integ', 'targets=', 'test=', 'log=', 'log-file=', 'tor=', 'quiet', 'verbose', 'help']
def parse(argv):
"""
Parses our arguments, providing a named tuple with their values.
:param list argv: input arguments to be parsed
:returns: a **named tuple** with our parsed arguments
:raises: **ValueError** if we got an invalid argument
"""
args = dict(DEFAULT_ARGS)
try:
recognized_args, unrecognized_args = getopt.getopt(argv, OPT, OPT_EXPANDED)
if unrecognized_args:
error_msg = "aren't recognized arguments" if len(unrecognized_args) > 1 else "isn't a recognized argument"
raise getopt.GetoptError("'%s' %s" % ("', '".join(unrecognized_args), error_msg))
except Exception as exc:
raise ValueError('%s (for usage provide --help)' % exc)
for opt, arg in recognized_args:
if opt in ('-a', '--all'):
args['run_unit'] = True
args['run_integ'] = True
elif opt in ('-u', '--unit'):
args['run_unit'] = True
elif opt in ('-i', '--integ'):
args['run_integ'] = True
elif opt in ('-t', '--targets'):
run_targets, attribute_targets = [], []
integ_targets = arg.split(',')
all_run_targets = [t for t in test.Target if CONFIG['target.torrc'].get(t) is not None]
# validates the targets and split them into run and attribute targets
if not integ_targets:
raise ValueError('No targets provided')
for target in integ_targets:
if target not in test.Target:
raise ValueError('Invalid integration target: %s' % target)
elif target in all_run_targets:
run_targets.append(target)
else:
attribute_targets.append(target)
# check if we were told to use all run targets
if test.Target.RUN_ALL in attribute_targets:
attribute_targets.remove(test.Target.RUN_ALL)
run_targets = all_run_targets
# if no RUN_* targets are provided then keep the default (otherwise we
# won't have any tests to run)
if run_targets:
args['run_targets'] = run_targets
args['attribute_targets'] = attribute_targets
elif opt == '--test':
args['specific_test'].append(crop_module_name(arg))
elif opt in ('-l', '--log'):
arg = arg.upper()
if arg not in stem.util.log.LOG_VALUES:
raise ValueError(LOG_TYPE_ERROR % arg)
args['logging_runlevel'] = arg
elif opt == '--log-file':
args['logging_path'] = arg
elif opt in ('--tor'):
args['tor_path'] = arg
elif opt in ('-q', '--quiet'):
args['quiet'] = True
elif opt in ('-v', '--verbose'):
args['verbose'] = True
elif opt in ('-h', '--help'):
args['print_help'] = True
# translates our args dict into a named tuple
Args = collections.namedtuple('Args', args.keys())
return Args(**args)
def get_help():
"""
Provides usage information, as provided by the '--help' argument. This
includes a listing of the valid integration targets.
:returns: **str** with our usage information
"""
help_msg = CONFIG['msg.help']
# gets the longest target length so we can show the entries in columns
target_name_length = max(map(len, test.Target))
description_format = '\n %%-%is - %%s' % target_name_length
for target in test.Target:
help_msg += description_format % (target, CONFIG['target.description'].get(target, ''))
help_msg += '\n'
return help_msg
def crop_module_name(name):
"""
Test modules have a 'test.unit.' or 'test.integ.' prefix which can
be omitted from our '--test' argument. Cropping this so we can do
normalized comparisons.
:param str name: module name to crop
:returns: **str** with the cropped module name
"""
if name.startswith('test.unit.'):
return name[10:]
elif name.startswith('test.integ.'):
return name[11:]
else:
return name
|