File: runtests.in

package info (click to toggle)
linuxcnc 1%3A2.9.7-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 285,604 kB
  • sloc: python: 202,568; ansic: 109,036; cpp: 99,239; tcl: 16,054; xml: 10,631; sh: 10,303; makefile: 1,255; javascript: 138; sql: 72; asm: 15
file content (320 lines) | stat: -rwxr-xr-x 8,453 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
#!/bin/bash

# When running with "-v", the test itself runs in a pipeline with tee, and
# without pipefail we get the exit value from tee instead of from the test.
set -o pipefail

# The linuxcnc starter script sometimes tries to display X windows if
# DISPLAY is set.  We never want that while running tests, so unset it.
unset DISPLAY

# Some of our tests emit locale-sensitive strings, so reset the locale
# to a sane default.
export LC_ALL=C
export LANGUAGES=


case "$0" in
	*/*) MYDIR="${0%/*}" ;;
	*) MYDIR="`type -path $0`"; MYDIR="${MYDIR%/*}"
esac
MYDIR=$(cd $MYDIR; pwd);
TOPDIR=$(cd $MYDIR/..; pwd)

prefix=@prefix@
if test @RUN_IN_PLACE@ = yes; then
    . $TOPDIR/scripts/rip-environment >&/dev/null
    export HEADERS=@EMC2_HOME@/include
    export LIBDIR=${TOPDIR}/lib
    export REALTIME=realtime
else
    # Set $EMC2_HOME to $prefix for tests that depend on it
    export SYSTEM_BUILD=1
    export EMC2_HOME=@EMC2_HOME@
    export HEADERS=@includedir@/linuxcnc
    export LIBDIR=@EMC2_HOME@/lib
    export LINUXCNC_EMCSH=@WISH@
    export REALTIME=@REALTIME@
    export SUDO=sudo
fi
export PYTHON_CPPFLAGS="@PYTHON_CPPFLAGS@"
export PYTHON_EXTRA_LIBS="@PYTHON_EXTRA_LIBS@"
export PYTHON_EXTRA_LDFLAGS="@PYTHON_EXTRA_LDFLAGS@"
export PYTHON_LIBS="@PYTHON_EXTRA_LIBS@"

export RUNTESTS="$(readlink -f $0)"

NUM=0
FAIL=0; FAIL_NAMES=""
XFAIL=0
SKIP=0
VERBOSE=0

clean () {
    find $* \( -name "stderr" -or -name "result" \
	-or -name "*.var" -or -name "*.var.bak" \) \
	-print0 | xargs -0 rm -f
}

wait_for_result_close() {
    # Test for the 'result' and 'stderr' files in the current testdir to be
    # closed. The 'checkresult' script cannot be run if these files are still
    # in use and would cause a race condition.
    # This function should be called with the test's directory as CWD.
    presult="$(realpath -e -q "./result")"
    pstderr="$(realpath -e -q "./stderr")"
    if [ -z "$presult" ] || [ -z "$pstderr" ]; then
        echo "Internal error: Missing 'result' or 'stderr' in wait_for_result_close()"
        exit 2
    fi
    timeoutcnt=0
    while true; do
        lsof -- "$presult" > /dev/null 2>&1; resresult=$?
        lsof -- "$pstderr" > /dev/null 2>&1; resstderr=$?
        if [ $resresult -ne 0 ] && [ $resstderr -ne 0 ]; then
            # Neither 'result' nor 'stderr' are open anymore
            break
        fi
        if [ $timeoutcnt -ge 30 ]; then
            if [ $resresult -eq 0 ]; then
                echo "*** Timeout waiting for 'result' file to close"
            fi
            if [ $resstderr -eq 0 ]; then
                echo "*** Timeout waiting for 'stderr' file to close"
            fi
            echo "*** Test results may be invalid when checked."
            return 1
        fi
        sleep 1
        timeoutcnt=$((timeoutcnt + 1))
    done
    return 0
}

run_shell_script () {
    testname=$(basename $1)
    testdir=$(dirname $1)

    pushd $testdir > /dev/null
    if [ $VERBOSE -eq 1 ]; then
        (bash -x $testname | tee result) 3>&1 1>&2 2>&3 | tee stderr
    else
        bash -x $testname > result 2> stderr
    fi
    exitcode=$?
    wait_for_result_close
    popd > /dev/null || exit 2
    return "$exitcode"
}

run_executable () {
    testname=$(basename $1)
    testdir=$(dirname $1)

    pushd $testdir > /dev/null
    if [ $VERBOSE -eq 1 ]; then
        (./$testname | tee result) 3>&1 1>&2 2>&3 | tee stderr
    else
        ./$testname > result 2> stderr
    fi
    exitcode=$?
    wait_for_result_close
    popd > /dev/null || exit 2
    return "$exitcode"
}

run_without_overruns () {
    testname=$(basename $1)
    testdir=$(dirname $1)
    for i in $(seq 10); do
        if [ $i != 1 ]; then echo "--- $testdir: overrun detected in sampler, re-running test" 1>&2 ; fi

        pushd $testdir > /dev/null
        if [ $VERBOSE -eq 1 ]; then
            (halrun -f $testname | tee result) 3>&1 1>&2 2>&3 | tee stderr
        else
            halrun -f $testname > result 2> stderr
        fi
        exitcode=$?
        wait_for_result_close
        popd > /dev/null || exit 2

        if ! grep -q '^overrun$' $testdir/result; then return $exitcode; fi
    done
    echo "--- $testdir: $i overruns detected, giving up" 1>&2
    return 1
}

run_test() {
    testname=$1
    case $testname in
        *.hal) run_without_overruns $testname ;;
        *.sh) run_shell_script $testname ;;
        *) run_executable $testname ;;
    esac
}

TMPDIR=`mktemp -d /tmp/runtest.XXXXXX`
trap "rm -rf $TMPDIR" 0 1 2 3 9 15


run_tests () {
    find $@ -name test.hal -or -name test.sh -or -name test \
	| sort > $TMPDIR/alltests

    while read testname; do
	testdir=$(dirname $testname)
	if [ -e $testdir/skip ]; then
	    if ! [ -x $testdir/skip ] || ! $testdir/skip; then
		echo "Skipping disabled test: $testdir" 1>&2
		SKIP=$(($SKIP+1))
		continue
	    fi
	fi
	if $NOSUDO && [ -e $testdir/control ] && \
		grep Restrictions: $testdir/control | grep -q sudo; then
	    if ! [ -x $testdir/skip ] || ! $testdir/skip; then
		echo "Skipping sudo test: $testdir" 1>&2
		SKIP=$(($SKIP+1))
		continue
	    fi
	fi
	NUM=$(($NUM+1))
	export TEST_DIR=$(readlink -f $testdir)
	echo "Running test: $testdir" 1>&2
        if test -n "$SYSTEM_BUILD"; then
            # Tell `halcompile` where to install comps
            USER_MODULE_DIR=$(readlink -f $testdir) \
                PATH=$(readlink -f $testdir):$PATH \
                run_test $testname
        else
            run_test $testname
        fi
	exitcode=$?
	if [ $exitcode -ne 0 ]; then
	    reason="test run exited with $exitcode"
	else
	    if [ -e $testdir/checkresult ]; then
		$testdir/checkresult $testdir/result
		exitcode=$?
		reason="checkresult exited with $exitcode"
	    elif [ -f $testdir/expected ]; then
		cmp -s $testdir/expected $testdir/result
		exitcode=$?
		reason="result differed from expected"
		if [ $exitcode -ne 0 ]; then
		    diff -u $testdir/expected $testdir/result > $TMPDIR/diff
		    SIZE=$(wc -l < $TMPDIR/diff)
		    if [ $SIZE -lt 40 ]; then
			cat $TMPDIR/diff
		    else
			OMIT=$((SIZE-40))
			head -40 $TMPDIR/diff
			echo "($OMIT more lines omitted)"
		    fi
		fi
	    else
		exitcode=1
		reason="Neither expected nor checkresult existed"
	    fi
	fi
	if [ $exitcode -ne 0 ]; then
	    echo "*** $testdir: XFAIL: $reason"
            if test $PRINT = 1; then
                echo "************** result:"
                tail -500 $testdir/result | sed 's/^/        /'
                echo "************** stderr:"
                tail -500 $testdir/stderr | sed 's/^/        /'
                echo "**************"
            fi
	    if [ -f $testdir/xfail ]; then
		XFAIL=$(($XFAIL+1))
		if [ $NOCLEAN -eq 0 ]; then
		    rm -f $testdir/stderr $testdir/result \
			$testdir/*.var $testdir/*.var.bak
		fi
	    else
		FAIL=$(($FAIL+1))
		FAIL_NAMES="$FAIL_NAMES
	$testdir"
	    fi
            if test $STOP = 1; then
	        break
	    fi
	else
	    if [ -f $testdir/xfail ]; then
		echo "*** $testdir: XPASS: Passed, but was expected to fail"
	    else
		if [ $NOCLEAN -eq 0 ]; then
		    rm -f $testdir/stderr $testdir/result \
			$testdir/*.var $testdir/*.var.bak
		fi
	    fi
	fi
    done < $TMPDIR/alltests

    SUCC=$((NUM-FAIL-XFAIL))
    echo "Runtest: $NUM tests run, $SUCC successful, $FAIL failed + $XFAIL expected, $SKIP skipped"
    if [ $FAIL -ne 0 ]; then
	echo "Failed: $FAIL_NAMES"
	exit 1;
    else
	exit 0;
    fi
}

usage () {
    P=${0##*/}
    cat <<EOF
$P: Run HAL test suite items

Usage:
    $P [-n] [-s] [-p] tests
	Run tests.  With '-n', do not remove temporary files for successful
	tests.  With '-s', stop after any failed test.  With '-p', print
        stderr and result files.

    $P -c tests
	Remove temporary files from an earlier test run.

    $P -u
        Only run tests that require normal user access.  Skip tests
        requiring root or sudo.

    $P -v
        Show stdout and stderr (normally it's hidden).
EOF
}

CLEAN_ONLY=0
NOCLEAN=0
NOSUDO=false
STOP=0
PRINT=0
while getopts cnuvsph opt; do
    case "$opt" in
    c) CLEAN_ONLY=1 ;;
    n) NOCLEAN=1 ;;
    u) NOSUDO=true ;;
    v) VERBOSE=1 ;;
    s) STOP=1 ;;
    p) PRINT=1 ;;
    h|?) usage; exit 0 ;;
    *) usage; exit 1 ;;
    esac
done
shift $((OPTIND-1))

if [ $# -eq 0 ]; then
    if [ -f test.hal -o -f test.sh ]; then
        set -- .
    else
        set -- $TOPDIR/tests
    fi
fi

if [ $CLEAN_ONLY -eq 1 ]; then
    clean "$@"
else
    run_tests "$@"
fi