File: afr-lock-heal-basic.t

package info (click to toggle)
glusterfs 11.2-2
  • links: PTS
  • area: main
  • in suites: forky, sid
  • size: 28,244 kB
  • sloc: ansic: 471,238; sh: 45,610; python: 16,893; perl: 3,328; makefile: 2,014; yacc: 487; ruby: 171; lisp: 124; xml: 75; lex: 61
file content (123 lines) | stat: -rw-r--r-- 4,105 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#!/bin/bash

. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc

cleanup;

function is_gfapi_program_alive()
{
        pid=$1
        ps -p $pid
        if [ $? -eq 0 ]
        then
                echo "Y"
        else
                echo "N"
        fi
}

function fill_lock_info()
{
    local info
    local brick=$1
    pattern="ACTIVE.*client-${brick: -1}"

    brick_sdump=$(generate_brick_statedump $V0 $H0 $brick)
    info="$(egrep "$inode" $brick_sdump -A3| egrep "$pattern" | uniq | awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"

    echo "${info}"

    if [ -n "$info" ]
    then
        return 0
    else
        return 1
    fi
}

TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;

TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
EXPECT 'Created' volinfo_field $V0 'Status';
TEST $CLI volume set $V0 performance.write-behind off
TEST $CLI volume set $V0 performance.open-behind off
TEST $CLI volume set $V0 locks.mandatory-locking forced
TEST $CLI volume set $V0 enforce-mandatory-lock on
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';

logdir=`gluster --print-logdir`
TEST build_tester $(dirname $0)/afr-lock-heal-basic.c -lgfapi -ggdb

$(dirname $0)/afr-lock-heal-basic $H0 $V0 "/FILE" $logdir C1&
client1_pid=$!
TEST [ $client1_pid ]

$(dirname $0)/afr-lock-heal-basic $H0 $V0 "/FILE" $logdir C2&
client2_pid=$!
TEST [ $client2_pid ]

TEST sleep 5 # By now, the 2 clients would  have opened an fd on FILE and waiting for a SIGUSR1.
EXPECT "Y" is_gfapi_program_alive $client1_pid
EXPECT "Y" is_gfapi_program_alive $client2_pid

gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/FILE))
inode="FILE|gfid:$gfid_str"

# Kill brick-3 and let client-1 take lock on the file.
TEST kill_brick $V0 $H0 $B0/${V0}2
TEST kill -SIGUSR1 $client1_pid
# If program is still alive, glfs_file_lock() was a success.
EXPECT "Y" is_gfapi_program_alive $client1_pid

# Check lock is present on brick-1 and brick-2
TEST_WITHIN $PROCESS_UP_TIMEOUT fill_lock_info $B0/${V0}0
c1_lock_on_b1="${TEST_OUTPUT}"
TEST_WITHIN $PROCESS_UP_TIMEOUT fill_lock_info $B0/${V0}1
c1_lock_on_b2="${TEST_OUTPUT}"
TEST [ "$c1_lock_on_b1" == "$c1_lock_on_b2" ]

# Restart brick-3 and check that the lock has healed on it.
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2

# Note: We need to wait for client to re-open the fd. Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd. Also wait for lock heal.
# So we may need to check the statedump for locks multiple times.
TEST_WITHIN $PROCESS_UP_TIMEOUT fill_lock_info $B0/${V0}2
c1_lock_on_b3="${TEST_OUTPUT}"
TEST [ "$c1_lock_on_b1" == "$c1_lock_on_b3" ]

# Kill brick-1 and let client-2 preempt the lock on bricks 2 and 3.
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST kill -SIGUSR1 $client2_pid
# If program is still alive, glfs_file_lock() was a success.
EXPECT "Y" is_gfapi_program_alive $client2_pid

# Restart brick-1 and let lock healing complete.
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0

# Check that all bricks now have locks from client 2 only.
# Note: We need to wait for client to re-open the fd. Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd. Also wait for lock heal.
# So we may need to check the statedump for locks multiple times.
TEST_WITHIN $PROCESS_UP_TIMEOUT fill_lock_info $B0/${V0}0
c2_lock_on_b1="${TEST_OUTPUT}"
TEST_WITHIN $PROCESS_UP_TIMEOUT fill_lock_info $B0/${V0}1
c2_lock_on_b2="${TEST_OUTPUT}"
TEST_WITHIN $PROCESS_UP_TIMEOUT fill_lock_info $B0/${V0}2
c2_lock_on_b3="${TEST_OUTPUT}"
TEST [ "$c2_lock_on_b1" == "$c2_lock_on_b2" ]
TEST [ "$c2_lock_on_b1" == "$c2_lock_on_b3" ]
TEST [ "$c2_lock_on_b1" != "$c1_lock_on_b1" ]

#Let the client programs run and exit.
TEST kill -SIGUSR1 $client1_pid
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" is_gfapi_program_alive $client1_pid
TEST kill -SIGUSR1 $client2_pid
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" is_gfapi_program_alive $client2_pid

cleanup_tester $(dirname $0)/afr-lock-heal-basic
cleanup;