File: inode-leak.t

package info (click to toggle)
glusterfs 11.2-2
  • links: PTS
  • area: main
  • in suites: forky, sid
  • size: 28,244 kB
  • sloc: ansic: 471,238; sh: 45,610; python: 16,893; perl: 3,328; makefile: 2,014; yacc: 487; ruby: 171; lisp: 124; xml: 75; lex: 61
file content (31 lines) | stat: -rw-r--r-- 781 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
#!/bin/bash

. $(dirname $0)/../include.rc
. $(dirname $0)/../volume.rc

cleanup

TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
TEST $CLI volume start $V0
TEST glusterfs -s $H0 --volfile-id $V0 $M0

EXPECT "1" get_mount_active_size_value $V0 $M0
EXPECT "0" get_mount_lru_size_value $V0 $M0

TEST cp -rf /etc $M0
TEST find $M0
TEST rm -rf $M0/*

EXPECT "1" get_mount_active_size_value $V0 $M0
EXPECT "0" get_mount_lru_size_value $V0 $M0

cleanup

# Mainly marking it as known-issue as it is taking a *lot* of time.
# Revert back if we are below an hour in regression runs.
# Or consider running only in nightly regressions.

#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
#G_TESTDEF_TEST_STATUS_CENTOS6=KNOWN_ISSUE,BUG=000000