File: runregression

package info (click to toggle)
buddy 2.4%2Bdfsg-1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 2,404 kB
  • sloc: sh: 8,261; ansic: 6,740; cpp: 2,009; makefile: 136; csh: 61
file content (72 lines) | stat: -rwxr-xr-x 2,039 bytes parent folder | download | duplicates (8)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
#!/bin/tcsh -f

# This scripts runs the regression suite for the BuDDy package.
# 
# Methodology
# -----------
# This script is responsible to run the tests, compare against the expected
# results, and print report. Each test is run from its own directory.
# Each test directory is self contained, and should contain:
#
# + 'runtest' script which will create a file with the results - 'result' 
#   This result file will be used by runregression to be compared against 
#   the expected result.
#
# + 'expected' - the expected result.
#   It is the responsibility of each runtest script to produce the result
#   file clean from any unnecessary text which might cause to false alarms.
#   Examples : garbage collection messages, time / date messages, etc...
#   Be careful not to filter out important data !!
#
# + Executable to run. This executable will be run by runtest script of
#   each test. 

set RESULT_FILE = result
set EXPECTED_FILE = expected
set RUNTEST = runtest

@ tests_to_run = 0
@ tests_passed = 0

if ( $#argv == 0 ) then
    set testdirs = ( `ls` )
else
    set testdirs = ( $argv[1-] )
endif

foreach testdir ( $testdirs )
    if ( -d $testdir && -e $testdir/$RUNTEST ) then # a test dir
	cd $testdir
	@ tests_to_run ++
	
	# Clean to make sure no matter what - no old results exist
	if ( -e $RESULT_FILE ) rm $RESULT_FILE
	
	# Run the test!
	echo
	echo "Running test $testdir ( $tests_to_run ) ..."
	./$RUNTEST
	
	# Let's see if we have got result
	if ( -e $RESULT_FILE ) then 

	    # we have result, now compare to expected
	    diff $RESULT_FILE $EXPECTED_FILE >& /dev/null
	    if ( $status ) then # there is a diff between files
		echo "$testdir FAILED : diff found"
	    else # no diff
		echo "$testdir PASSED"
		@ tests_passed++
	    endif

	else # no results - test failed
	    echo "$testdir FAILED : no $RESULT_FILE created"
	endif
	if ( -e $RESULT_FILE ) rm $RESULT_FILE # clean what we have done
	cd ..
    endif
end

echo
echo "Total tests to run : $tests_to_run"
echo "Passed tests       : $tests_passed"