1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
|
#!/bin/sh
# PCP QA Test No. 1684
# Exercise python derived metrics interface.
#
# Copyright (c) 2024 Red Hat. All Rights Reserved.
#
seq=`basename $0`
echo "QA output created by $seq"
. ./common.python
_cleanup()
{
cd $here
$sudo rm -rf $tmp $tmp.*
}
status=0 # success is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
# quoting, brackets and commas is a Python version thing (yuk)
# ('Loading derived metrics from', 'no-such-file')
# ('Failed to load:', 'No such file or directory')
# ('Successfully loaded', 2, 'derived metrics')
#
_filter()
{
sed \
-e "s@$tmp@TMP@g" \
-e "/Loading derived metrics/s/[('),]//g" \
-e "/Failed to load/s/[('),]//g" \
-e "/Successfully loaded/s/[('),]//g" \
# end
}
mkdir $tmp
echo "x = no-such-metric" >$tmp/a
echo "y = sample.lights" >>$tmp/a
echo "z = hinv.ncpu + hinv.ndisk" >>$tmp/a
echo "a = sample.lights" >$tmp/b
echo "b = bozo}" >>$tmp/b
echo "c=sample.bin*hinv.ncpu" >>$tmp/b
echo "d = +" >>$tmp/b
# Don't load any global derived metric configs by default
PCP_DERIVED_CONFIG=""; export PCP_DERIVED_CONFIG
# real QA test starts here
echo "## Missing file - outright failure"
src/test_pcp_derived.py no-such-file >$tmp/out 2>&1
[ $? -eq 1 ] || status=1 # expect a failure
_filter < $tmp/out
echo
echo "## Valid metrics - outright success"
src/test_pcp_derived.py $tmp/a >$tmp/out 2>&1
[ $? -eq 0 ] || status=1 # expect success
_filter < $tmp/out
echo
echo "## Mix of metrics - partial success"
src/test_pcp_derived.py $tmp/b >$tmp/out 2>&1
[ $? -eq 0 ] || status=1 # expect success
_filter < $tmp/out
# success, all done
exit
|