File: test_worker.sh

package info (click to toggle)
debci 3.13
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 1,656 kB
  • sloc: ruby: 6,516; sh: 2,437; javascript: 100; makefile: 92; perl: 11
file content (157 lines) | stat: -rw-r--r-- 4,375 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
#!/bin/sh
set -u

. $(dirname $0)/test_helper.sh

# let's mess with a separate queue just for this test
export debci_amqp_queue="${debci_amqp_queue}-stress"

request() {
  debci enqueue $1
}

existing_processes=$(ps hx -o pid,comm|egrep "(debci|test-package|autopkgtest|amqp-consume)"| ruby -e 'puts STDIN.readlines.map{ |l| l.split.first }.to_a.join("|")')

settle_processes() {
  local timeout=600
  while [ $timeout -gt 0 ]; do
    PS=$(ps hx -o pid,comm| egrep -v "^\s*($existing_processes)\b" | egrep "(debci|test-package|autopkgtest|amqp-consume)"|sort -u)
    [ -n "$PS" ] || break
    timeout=$((timeout - 1))
    sleep 0.1
  done
  assertEquals "unexpected leftover processes" "" "$PS"
}

run_mypkg() {
  start_worker
  debci amqp declare-queue
  start_collector
  request mypkg
  # give it some time to process requests; make it large for slow systems
  sleep 2
  if [ "${DEBCI_FAKE_KILLPARENT:-x}" = "amqp-consume" ]; then
    [ ! -e /proc/$TEST_WORKER_PID ] || fail "test worker unexpectedly survived"
  else
    [ -e /proc/$TEST_WORKER_PID ] || fail "test worker unexpectedly died"
  fi
  stop_worker
  stop_collector
  settle_processes
  RESULT_DIR=$(autopkgtest_dir_for_package mypkg)
}

test_no_crash_success() {
  unset DEBCI_FAKE_KILLPARENT
  result_pass run_mypkg
  assertEquals "has leftover requests" "0" $(clean_queue)
  # we should have one log
  assertEquals 1 "$(ls $RESULT_DIR/*/log.gz | wc -l)"
}

test_no_crash_fail() {
  unset DEBCI_FAKE_KILLPARENT
  result_fail run_mypkg
  assertEquals "has leftover requests" "0" "$(clean_queue)"
  # we should have one log
  assertEquals 1 "$(ls $RESULT_DIR/*/log.gz | wc -l)"
}

test_no_crash_tmpfail() {
  unset DEBCI_FAKE_KILLPARENT
  result_tmpfail run_mypkg
  assertEquals "has leftover requests" "0" "$(clean_queue)"
  # we should have one log
  assertEquals 1 "$(ls $RESULT_DIR/*/log.gz | wc -l)"
}

test_crash_test_package() {
  export DEBCI_FAKE_KILLPARENT="test-package"
  result_pass run_mypkg
  assertEquals "aborted request got lost" "1" "$(clean_queue)"
  # no result was ever received
  assertEquals 0 "$(ls $RESULT_DIR/*/log.gz 2>/dev/null| wc -l)"
}

test_crash_debci_test() {
  export DEBCI_FAKE_KILLPARENT="debci-test"
  result_pass run_mypkg
  assertEquals "aborted request got lost" "1" "$(clean_queue)"
  assertFalse "has unexpected result dir" "[ -e \"$RESULT_DIR\" ]"
}

test_crash_worker() {
  export DEBCI_FAKE_KILLPARENT="debci-worker"
  run_mypkg
  assertEquals "aborted request got lost" "1" "$(clean_queue)"
  # there should be no logs
  assertFalse "has unexpected result dir" "[ -e \"$RESULT_DIR\" ]"
}

# generate lots of test requests, start lots of workers, and then go around and
# crash two thirds of them; ensure that we get all results
NUM_REQUESTS=100
NUM_WORKERS=30
test_smoke() {
  unset DEBCI_FAKE_KILLPARENT

  start_rabbitmq_server
  debci amqp declare-queue

  local WORKERS=''
  for i in `seq $NUM_WORKERS`; do
    debci worker &
    WORKERS="$WORKERS $!"
  done
  debci publisher &
  PUBLISHER=$!
  sleep 0.3

  for i in `seq $NUM_REQUESTS`; do
    request pkg$i
  done

  local i=0
  for w in $WORKERS; do
    i=$(( (i + 1) % 3))
    if [ $i -ne 0 ]; then
      kill -kill $w
    fi
  done

  start_collector

  # wait until all requests have been consumed; unfortunately we have no shell
  # tool (except rabbitmqctl list_queues, which needs root) to show the queue
  # status, so we poll for all packages being handled
  local timeout=600
  local completed=0
  while [ $completed -lt $NUM_REQUESTS ] && [ $timeout -gt 0 ]; do
    sleep 0.1
    timeout=$((timeout - 1))
    while [ $(find $debci_data_basedir/autopkgtest/unstable/$debci_arch/p/pkg$(($completed + 1)) -name log.gz 2>/dev/null | wc -l) -gt 0 ]; do
      completed=$(($completed + 1))
    done
  done
  if [ $timeout -eq 0 ]; then
    echo "TIMED OUT"
  fi
  assertEquals "has leftover requests" "0" "$(clean_queue)"

  # clean up the remaining ones
  for w in $WORKERS; do
    kill $w 2>/dev/null && wait $w || true
  done
  kill $PUBLISHER 2>/dev/null && wait $PUBLISHER || true
  stop_collector
  settle_processes

  # some tests get restarted, so we expect one or two logs
  for i in `seq $NUM_REQUESTS`; do
    local d=$(autopkgtest_dir_for_package pkg$i)
    nlogs=$(ls $d/*/log.gz | wc -l)
    assertTrue "one or two logs for pkg$i" "[ $nlogs -eq 1 -o $nlogs -eq 2 ]"
  done
}

. shunit2